168d75effSDimitry Andric //===-- asan_allocator.cpp ------------------------------------------------===// 268d75effSDimitry Andric // 368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 668d75effSDimitry Andric // 768d75effSDimitry Andric //===----------------------------------------------------------------------===// 868d75effSDimitry Andric // 968d75effSDimitry Andric // This file is a part of AddressSanitizer, an address sanity checker. 1068d75effSDimitry Andric // 1168d75effSDimitry Andric // Implementation of ASan's memory allocator, 2-nd version. 1268d75effSDimitry Andric // This variant uses the allocator from sanitizer_common, i.e. the one shared 1368d75effSDimitry Andric // with ThreadSanitizer and MemorySanitizer. 1468d75effSDimitry Andric // 1568d75effSDimitry Andric //===----------------------------------------------------------------------===// 1668d75effSDimitry Andric 1768d75effSDimitry Andric #include "asan_allocator.h" 18e8d8bef9SDimitry Andric 1906c3fb27SDimitry Andric #include "asan_internal.h" 2068d75effSDimitry Andric #include "asan_mapping.h" 2168d75effSDimitry Andric #include "asan_poisoning.h" 2268d75effSDimitry Andric #include "asan_report.h" 2368d75effSDimitry Andric #include "asan_stack.h" 2468d75effSDimitry Andric #include "asan_thread.h" 25e8d8bef9SDimitry Andric #include "lsan/lsan_common.h" 2668d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_checks.h" 2768d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_interface.h" 2806c3fb27SDimitry Andric #include "sanitizer_common/sanitizer_common.h" 2968d75effSDimitry Andric #include "sanitizer_common/sanitizer_errno.h" 3068d75effSDimitry Andric #include "sanitizer_common/sanitizer_flags.h" 3168d75effSDimitry Andric #include "sanitizer_common/sanitizer_internal_defs.h" 3268d75effSDimitry Andric #include "sanitizer_common/sanitizer_list.h" 3368d75effSDimitry Andric #include "sanitizer_common/sanitizer_quarantine.h" 34e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h" 3568d75effSDimitry Andric 3668d75effSDimitry Andric namespace __asan { 3768d75effSDimitry Andric 3868d75effSDimitry Andric // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 3968d75effSDimitry Andric // We use adaptive redzones: for larger allocation larger redzones are used. 4068d75effSDimitry Andric static u32 RZLog2Size(u32 rz_log) { 4168d75effSDimitry Andric CHECK_LT(rz_log, 8); 4268d75effSDimitry Andric return 16 << rz_log; 4368d75effSDimitry Andric } 4468d75effSDimitry Andric 4568d75effSDimitry Andric static u32 RZSize2Log(u32 rz_size) { 4668d75effSDimitry Andric CHECK_GE(rz_size, 16); 4768d75effSDimitry Andric CHECK_LE(rz_size, 2048); 4868d75effSDimitry Andric CHECK(IsPowerOfTwo(rz_size)); 4968d75effSDimitry Andric u32 res = Log2(rz_size) - 4; 5068d75effSDimitry Andric CHECK_EQ(rz_size, RZLog2Size(res)); 5168d75effSDimitry Andric return res; 5268d75effSDimitry Andric } 5368d75effSDimitry Andric 5468d75effSDimitry Andric static AsanAllocator &get_allocator(); 5568d75effSDimitry Andric 56e8d8bef9SDimitry Andric static void AtomicContextStore(volatile atomic_uint64_t *atomic_context, 57e8d8bef9SDimitry Andric u32 tid, u32 stack) { 58e8d8bef9SDimitry Andric u64 context = tid; 59e8d8bef9SDimitry Andric context <<= 32; 60e8d8bef9SDimitry Andric context += stack; 61e8d8bef9SDimitry Andric atomic_store(atomic_context, context, memory_order_relaxed); 62e8d8bef9SDimitry Andric } 63e8d8bef9SDimitry Andric 64e8d8bef9SDimitry Andric static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context, 65e8d8bef9SDimitry Andric u32 &tid, u32 &stack) { 66e8d8bef9SDimitry Andric u64 context = atomic_load(atomic_context, memory_order_relaxed); 67e8d8bef9SDimitry Andric stack = context; 68e8d8bef9SDimitry Andric context >>= 32; 69e8d8bef9SDimitry Andric tid = context; 70e8d8bef9SDimitry Andric } 71e8d8bef9SDimitry Andric 7268d75effSDimitry Andric // The memory chunk allocated from the underlying allocator looks like this: 7368d75effSDimitry Andric // L L L L L L H H U U U U U U R R 7468d75effSDimitry Andric // L -- left redzone words (0 or more bytes) 7568d75effSDimitry Andric // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 7668d75effSDimitry Andric // U -- user memory. 7768d75effSDimitry Andric // R -- right redzone (0 or more bytes) 7868d75effSDimitry Andric // ChunkBase consists of ChunkHeader and other bytes that overlap with user 7968d75effSDimitry Andric // memory. 8068d75effSDimitry Andric 8168d75effSDimitry Andric // If the left redzone is greater than the ChunkHeader size we store a magic 8268d75effSDimitry Andric // value in the first uptr word of the memory block and store the address of 8368d75effSDimitry Andric // ChunkBase in the next uptr. 8468d75effSDimitry Andric // M B L L L L L L L L L H H U U U U U U 8568d75effSDimitry Andric // | ^ 8668d75effSDimitry Andric // ---------------------| 8768d75effSDimitry Andric // M -- magic value kAllocBegMagic 8868d75effSDimitry Andric // B -- address of ChunkHeader pointing to the first 'H' 8968d75effSDimitry Andric 90e8d8bef9SDimitry Andric class ChunkHeader { 91e8d8bef9SDimitry Andric public: 92e8d8bef9SDimitry Andric atomic_uint8_t chunk_state; 93e8d8bef9SDimitry Andric u8 alloc_type : 2; 94e8d8bef9SDimitry Andric u8 lsan_tag : 2; 9568d75effSDimitry Andric 9668d75effSDimitry Andric // align < 8 -> 0 9768d75effSDimitry Andric // else -> log2(min(align, 512)) - 2 98e8d8bef9SDimitry Andric u8 user_requested_alignment_log : 3; 99e8d8bef9SDimitry Andric 100e8d8bef9SDimitry Andric private: 101e8d8bef9SDimitry Andric u16 user_requested_size_hi; 102e8d8bef9SDimitry Andric u32 user_requested_size_lo; 103e8d8bef9SDimitry Andric atomic_uint64_t alloc_context_id; 104e8d8bef9SDimitry Andric 105e8d8bef9SDimitry Andric public: 106e8d8bef9SDimitry Andric uptr UsedSize() const { 107349cc55cSDimitry Andric static_assert(sizeof(user_requested_size_lo) == 4, 108349cc55cSDimitry Andric "Expression below requires this"); 109349cc55cSDimitry Andric return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) + 110349cc55cSDimitry Andric user_requested_size_lo; 111e8d8bef9SDimitry Andric } 112e8d8bef9SDimitry Andric 113e8d8bef9SDimitry Andric void SetUsedSize(uptr size) { 114e8d8bef9SDimitry Andric user_requested_size_lo = size; 115349cc55cSDimitry Andric static_assert(sizeof(user_requested_size_lo) == 4, 116349cc55cSDimitry Andric "Expression below requires this"); 117349cc55cSDimitry Andric user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32); 118349cc55cSDimitry Andric CHECK_EQ(UsedSize(), size); 119e8d8bef9SDimitry Andric } 120e8d8bef9SDimitry Andric 121e8d8bef9SDimitry Andric void SetAllocContext(u32 tid, u32 stack) { 122e8d8bef9SDimitry Andric AtomicContextStore(&alloc_context_id, tid, stack); 123e8d8bef9SDimitry Andric } 124e8d8bef9SDimitry Andric 125e8d8bef9SDimitry Andric void GetAllocContext(u32 &tid, u32 &stack) const { 126e8d8bef9SDimitry Andric AtomicContextLoad(&alloc_context_id, tid, stack); 127e8d8bef9SDimitry Andric } 12868d75effSDimitry Andric }; 12968d75effSDimitry Andric 130e8d8bef9SDimitry Andric class ChunkBase : public ChunkHeader { 131e8d8bef9SDimitry Andric atomic_uint64_t free_context_id; 132e8d8bef9SDimitry Andric 133e8d8bef9SDimitry Andric public: 134e8d8bef9SDimitry Andric void SetFreeContext(u32 tid, u32 stack) { 135e8d8bef9SDimitry Andric AtomicContextStore(&free_context_id, tid, stack); 136e8d8bef9SDimitry Andric } 137e8d8bef9SDimitry Andric 138e8d8bef9SDimitry Andric void GetFreeContext(u32 &tid, u32 &stack) const { 139e8d8bef9SDimitry Andric AtomicContextLoad(&free_context_id, tid, stack); 140e8d8bef9SDimitry Andric } 14168d75effSDimitry Andric }; 14268d75effSDimitry Andric 14368d75effSDimitry Andric static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 14468d75effSDimitry Andric static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 14568d75effSDimitry Andric COMPILER_CHECK(kChunkHeaderSize == 16); 14668d75effSDimitry Andric COMPILER_CHECK(kChunkHeader2Size <= 16); 14768d75effSDimitry Andric 14868d75effSDimitry Andric enum { 149e8d8bef9SDimitry Andric // Either just allocated by underlying allocator, but AsanChunk is not yet 150e8d8bef9SDimitry Andric // ready, or almost returned to undelying allocator and AsanChunk is already 151e8d8bef9SDimitry Andric // meaningless. 152e8d8bef9SDimitry Andric CHUNK_INVALID = 0, 153e8d8bef9SDimitry Andric // The chunk is allocated and not yet freed. 15468d75effSDimitry Andric CHUNK_ALLOCATED = 2, 155e8d8bef9SDimitry Andric // The chunk was freed and put into quarantine zone. 156e8d8bef9SDimitry Andric CHUNK_QUARANTINE = 3, 15768d75effSDimitry Andric }; 15868d75effSDimitry Andric 159e8d8bef9SDimitry Andric class AsanChunk : public ChunkBase { 160e8d8bef9SDimitry Andric public: 16168d75effSDimitry Andric uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 162e8d8bef9SDimitry Andric bool AddrIsInside(uptr addr) { 163e8d8bef9SDimitry Andric return (addr >= Beg()) && (addr < Beg() + UsedSize()); 16468d75effSDimitry Andric } 165e8d8bef9SDimitry Andric }; 166e8d8bef9SDimitry Andric 167e8d8bef9SDimitry Andric class LargeChunkHeader { 168e8d8bef9SDimitry Andric static constexpr uptr kAllocBegMagic = 169e8d8bef9SDimitry Andric FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL); 170e8d8bef9SDimitry Andric atomic_uintptr_t magic; 171e8d8bef9SDimitry Andric AsanChunk *chunk_header; 172e8d8bef9SDimitry Andric 173e8d8bef9SDimitry Andric public: 174e8d8bef9SDimitry Andric AsanChunk *Get() const { 175e8d8bef9SDimitry Andric return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic 176e8d8bef9SDimitry Andric ? chunk_header 177e8d8bef9SDimitry Andric : nullptr; 17868d75effSDimitry Andric } 179e8d8bef9SDimitry Andric 180e8d8bef9SDimitry Andric void Set(AsanChunk *p) { 181e8d8bef9SDimitry Andric if (p) { 182e8d8bef9SDimitry Andric chunk_header = p; 183e8d8bef9SDimitry Andric atomic_store(&magic, kAllocBegMagic, memory_order_release); 184e8d8bef9SDimitry Andric return; 18568d75effSDimitry Andric } 186e8d8bef9SDimitry Andric 187e8d8bef9SDimitry Andric uptr old = kAllocBegMagic; 188e8d8bef9SDimitry Andric if (!atomic_compare_exchange_strong(&magic, &old, 0, 189e8d8bef9SDimitry Andric memory_order_release)) { 190e8d8bef9SDimitry Andric CHECK_EQ(old, kAllocBegMagic); 191e8d8bef9SDimitry Andric } 19268d75effSDimitry Andric } 19368d75effSDimitry Andric }; 19468d75effSDimitry Andric 19506c3fb27SDimitry Andric static void FillChunk(AsanChunk *m) { 19606c3fb27SDimitry Andric // FIXME: Use ReleaseMemoryPagesToOS. 19706c3fb27SDimitry Andric Flags &fl = *flags(); 19806c3fb27SDimitry Andric 19906c3fb27SDimitry Andric if (fl.max_free_fill_size > 0) { 20006c3fb27SDimitry Andric // We have to skip the chunk header, it contains free_context_id. 20106c3fb27SDimitry Andric uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; 20206c3fb27SDimitry Andric if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. 20306c3fb27SDimitry Andric uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; 20406c3fb27SDimitry Andric size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); 20506c3fb27SDimitry Andric REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); 20606c3fb27SDimitry Andric } 20706c3fb27SDimitry Andric } 20806c3fb27SDimitry Andric } 20906c3fb27SDimitry Andric 21068d75effSDimitry Andric struct QuarantineCallback { 21168d75effSDimitry Andric QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) 21268d75effSDimitry Andric : cache_(cache), 21368d75effSDimitry Andric stack_(stack) { 21468d75effSDimitry Andric } 21568d75effSDimitry Andric 21606c3fb27SDimitry Andric void PreQuarantine(AsanChunk *m) const { 21706c3fb27SDimitry Andric FillChunk(m); 21806c3fb27SDimitry Andric // Poison the region. 21906c3fb27SDimitry Andric PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), 22006c3fb27SDimitry Andric kAsanHeapFreeMagic); 22106c3fb27SDimitry Andric } 22206c3fb27SDimitry Andric 22306c3fb27SDimitry Andric void Recycle(AsanChunk *m) const { 224e8d8bef9SDimitry Andric void *p = get_allocator().GetBlockBegin(m); 22506c3fb27SDimitry Andric 22606c3fb27SDimitry Andric // The secondary will immediately unpoison and unmap the memory, so this 22706c3fb27SDimitry Andric // branch is unnecessary. 22806c3fb27SDimitry Andric if (get_allocator().FromPrimary(p)) { 229e8d8bef9SDimitry Andric if (p != m) { 230e8d8bef9SDimitry Andric // Clear the magic value, as allocator internals may overwrite the 231e8d8bef9SDimitry Andric // contents of deallocated chunk, confusing GetAsanChunk lookup. 232e8d8bef9SDimitry Andric reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr); 233e8d8bef9SDimitry Andric } 234e8d8bef9SDimitry Andric 235e8d8bef9SDimitry Andric u8 old_chunk_state = CHUNK_QUARANTINE; 236e8d8bef9SDimitry Andric if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, 23706c3fb27SDimitry Andric CHUNK_INVALID, 23806c3fb27SDimitry Andric memory_order_acquire)) { 239e8d8bef9SDimitry Andric CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE); 240e8d8bef9SDimitry Andric } 241e8d8bef9SDimitry Andric 2420eae32dcSDimitry Andric PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY), 24368d75effSDimitry Andric kAsanHeapLeftRedzoneMagic); 24406c3fb27SDimitry Andric } 24568d75effSDimitry Andric 24668d75effSDimitry Andric // Statistics. 24768d75effSDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 24868d75effSDimitry Andric thread_stats.real_frees++; 24968d75effSDimitry Andric thread_stats.really_freed += m->UsedSize(); 25068d75effSDimitry Andric 25168d75effSDimitry Andric get_allocator().Deallocate(cache_, p); 25268d75effSDimitry Andric } 25368d75effSDimitry Andric 25406c3fb27SDimitry Andric void RecyclePassThrough(AsanChunk *m) const { 25506c3fb27SDimitry Andric // Recycle for the secondary will immediately unpoison and unmap the 25606c3fb27SDimitry Andric // memory, so quarantine preparation is unnecessary. 25706c3fb27SDimitry Andric if (get_allocator().FromPrimary(m)) { 25806c3fb27SDimitry Andric // The primary allocation may need pattern fill if enabled. 25906c3fb27SDimitry Andric FillChunk(m); 26006c3fb27SDimitry Andric } 26106c3fb27SDimitry Andric Recycle(m); 26206c3fb27SDimitry Andric } 26306c3fb27SDimitry Andric 26406c3fb27SDimitry Andric void *Allocate(uptr size) const { 26568d75effSDimitry Andric void *res = get_allocator().Allocate(cache_, size, 1); 26668d75effSDimitry Andric // TODO(alekseys): Consider making quarantine OOM-friendly. 26768d75effSDimitry Andric if (UNLIKELY(!res)) 26868d75effSDimitry Andric ReportOutOfMemory(size, stack_); 26968d75effSDimitry Andric return res; 27068d75effSDimitry Andric } 27168d75effSDimitry Andric 27206c3fb27SDimitry Andric void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); } 27368d75effSDimitry Andric 27468d75effSDimitry Andric private: 27568d75effSDimitry Andric AllocatorCache* const cache_; 27668d75effSDimitry Andric BufferedStackTrace* const stack_; 27768d75effSDimitry Andric }; 27868d75effSDimitry Andric 27968d75effSDimitry Andric typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 28068d75effSDimitry Andric typedef AsanQuarantine::Cache QuarantineCache; 28168d75effSDimitry Andric 28268d75effSDimitry Andric void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { 28368d75effSDimitry Andric PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 28468d75effSDimitry Andric // Statistics. 28568d75effSDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 28668d75effSDimitry Andric thread_stats.mmaps++; 28768d75effSDimitry Andric thread_stats.mmaped += size; 28868d75effSDimitry Andric } 28906c3fb27SDimitry Andric 29006c3fb27SDimitry Andric void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin, 29106c3fb27SDimitry Andric uptr user_size) const { 29206c3fb27SDimitry Andric uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY); 29306c3fb27SDimitry Andric user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY); 29406c3fb27SDimitry Andric // The secondary mapping will be immediately returned to user, no value 29506c3fb27SDimitry Andric // poisoning that with non-zero just before unpoisoning by Allocate(). So just 29606c3fb27SDimitry Andric // poison head/tail invisible to Allocate(). 29706c3fb27SDimitry Andric PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic); 29806c3fb27SDimitry Andric PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic); 29906c3fb27SDimitry Andric // Statistics. 30006c3fb27SDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 30106c3fb27SDimitry Andric thread_stats.mmaps++; 30206c3fb27SDimitry Andric thread_stats.mmaped += size; 30306c3fb27SDimitry Andric } 30406c3fb27SDimitry Andric 30568d75effSDimitry Andric void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { 30668d75effSDimitry Andric PoisonShadow(p, size, 0); 30768d75effSDimitry Andric // We are about to unmap a chunk of user memory. 30868d75effSDimitry Andric // Mark the corresponding shadow memory as not needed. 30968d75effSDimitry Andric FlushUnneededASanShadowMemory(p, size); 31068d75effSDimitry Andric // Statistics. 31168d75effSDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 31268d75effSDimitry Andric thread_stats.munmaps++; 31368d75effSDimitry Andric thread_stats.munmaped += size; 31468d75effSDimitry Andric } 31568d75effSDimitry Andric 31668d75effSDimitry Andric // We can not use THREADLOCAL because it is not supported on some of the 31768d75effSDimitry Andric // platforms we care about (OSX 10.6, Android). 31868d75effSDimitry Andric // static THREADLOCAL AllocatorCache cache; 31968d75effSDimitry Andric AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 32068d75effSDimitry Andric CHECK(ms); 32168d75effSDimitry Andric return &ms->allocator_cache; 32268d75effSDimitry Andric } 32368d75effSDimitry Andric 32468d75effSDimitry Andric QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 32568d75effSDimitry Andric CHECK(ms); 32668d75effSDimitry Andric CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 32768d75effSDimitry Andric return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 32868d75effSDimitry Andric } 32968d75effSDimitry Andric 33068d75effSDimitry Andric void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { 33168d75effSDimitry Andric quarantine_size_mb = f->quarantine_size_mb; 33268d75effSDimitry Andric thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; 33368d75effSDimitry Andric min_redzone = f->redzone; 33468d75effSDimitry Andric max_redzone = f->max_redzone; 33568d75effSDimitry Andric may_return_null = cf->allocator_may_return_null; 33668d75effSDimitry Andric alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; 33768d75effSDimitry Andric release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; 33868d75effSDimitry Andric } 33968d75effSDimitry Andric 34068d75effSDimitry Andric void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { 34168d75effSDimitry Andric f->quarantine_size_mb = quarantine_size_mb; 34268d75effSDimitry Andric f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; 34368d75effSDimitry Andric f->redzone = min_redzone; 34468d75effSDimitry Andric f->max_redzone = max_redzone; 34568d75effSDimitry Andric cf->allocator_may_return_null = may_return_null; 34668d75effSDimitry Andric f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; 34768d75effSDimitry Andric cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; 34868d75effSDimitry Andric } 34968d75effSDimitry Andric 35068d75effSDimitry Andric struct Allocator { 35168d75effSDimitry Andric static const uptr kMaxAllowedMallocSize = 35268d75effSDimitry Andric FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); 35368d75effSDimitry Andric 35468d75effSDimitry Andric AsanAllocator allocator; 35568d75effSDimitry Andric AsanQuarantine quarantine; 35668d75effSDimitry Andric StaticSpinMutex fallback_mutex; 35768d75effSDimitry Andric AllocatorCache fallback_allocator_cache; 35868d75effSDimitry Andric QuarantineCache fallback_quarantine_cache; 35968d75effSDimitry Andric 360480093f4SDimitry Andric uptr max_user_defined_malloc_size; 36168d75effSDimitry Andric 36268d75effSDimitry Andric // ------------------- Options -------------------------- 36368d75effSDimitry Andric atomic_uint16_t min_redzone; 36468d75effSDimitry Andric atomic_uint16_t max_redzone; 36568d75effSDimitry Andric atomic_uint8_t alloc_dealloc_mismatch; 36668d75effSDimitry Andric 36768d75effSDimitry Andric // ------------------- Initialization ------------------------ 36868d75effSDimitry Andric explicit Allocator(LinkerInitialized) 36968d75effSDimitry Andric : quarantine(LINKER_INITIALIZED), 37068d75effSDimitry Andric fallback_quarantine_cache(LINKER_INITIALIZED) {} 37168d75effSDimitry Andric 37268d75effSDimitry Andric void CheckOptions(const AllocatorOptions &options) const { 37368d75effSDimitry Andric CHECK_GE(options.min_redzone, 16); 37468d75effSDimitry Andric CHECK_GE(options.max_redzone, options.min_redzone); 37568d75effSDimitry Andric CHECK_LE(options.max_redzone, 2048); 37668d75effSDimitry Andric CHECK(IsPowerOfTwo(options.min_redzone)); 37768d75effSDimitry Andric CHECK(IsPowerOfTwo(options.max_redzone)); 37868d75effSDimitry Andric } 37968d75effSDimitry Andric 38068d75effSDimitry Andric void SharedInitCode(const AllocatorOptions &options) { 38168d75effSDimitry Andric CheckOptions(options); 38268d75effSDimitry Andric quarantine.Init((uptr)options.quarantine_size_mb << 20, 38368d75effSDimitry Andric (uptr)options.thread_local_quarantine_size_kb << 10); 38468d75effSDimitry Andric atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, 38568d75effSDimitry Andric memory_order_release); 38668d75effSDimitry Andric atomic_store(&min_redzone, options.min_redzone, memory_order_release); 38768d75effSDimitry Andric atomic_store(&max_redzone, options.max_redzone, memory_order_release); 38868d75effSDimitry Andric } 38968d75effSDimitry Andric 39068d75effSDimitry Andric void InitLinkerInitialized(const AllocatorOptions &options) { 39168d75effSDimitry Andric SetAllocatorMayReturnNull(options.may_return_null); 39268d75effSDimitry Andric allocator.InitLinkerInitialized(options.release_to_os_interval_ms); 39368d75effSDimitry Andric SharedInitCode(options); 394480093f4SDimitry Andric max_user_defined_malloc_size = common_flags()->max_allocation_size_mb 395480093f4SDimitry Andric ? common_flags()->max_allocation_size_mb 396480093f4SDimitry Andric << 20 397480093f4SDimitry Andric : kMaxAllowedMallocSize; 39868d75effSDimitry Andric } 39968d75effSDimitry Andric 40068d75effSDimitry Andric void RePoisonChunk(uptr chunk) { 40168d75effSDimitry Andric // This could be a user-facing chunk (with redzones), or some internal 40268d75effSDimitry Andric // housekeeping chunk, like TransferBatch. Start by assuming the former. 40368d75effSDimitry Andric AsanChunk *ac = GetAsanChunk((void *)chunk); 404e8d8bef9SDimitry Andric uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk); 405e8d8bef9SDimitry Andric if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == 406e8d8bef9SDimitry Andric CHUNK_ALLOCATED) { 40768d75effSDimitry Andric uptr beg = ac->Beg(); 408e8d8bef9SDimitry Andric uptr end = ac->Beg() + ac->UsedSize(); 40968d75effSDimitry Andric uptr chunk_end = chunk + allocated_size; 410e8d8bef9SDimitry Andric if (chunk < beg && beg < end && end <= chunk_end) { 41168d75effSDimitry Andric // Looks like a valid AsanChunk in use, poison redzones only. 41268d75effSDimitry Andric PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); 4130eae32dcSDimitry Andric uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY); 41468d75effSDimitry Andric FastPoisonShadowPartialRightRedzone( 41568d75effSDimitry Andric end_aligned_down, end - end_aligned_down, 41668d75effSDimitry Andric chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); 417e8d8bef9SDimitry Andric return; 418e8d8bef9SDimitry Andric } 419e8d8bef9SDimitry Andric } 420e8d8bef9SDimitry Andric 42168d75effSDimitry Andric // This is either not an AsanChunk or freed or quarantined AsanChunk. 42268d75effSDimitry Andric // In either case, poison everything. 42368d75effSDimitry Andric PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); 42468d75effSDimitry Andric } 42568d75effSDimitry Andric 42668d75effSDimitry Andric void ReInitialize(const AllocatorOptions &options) { 42768d75effSDimitry Andric SetAllocatorMayReturnNull(options.may_return_null); 42868d75effSDimitry Andric allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); 42968d75effSDimitry Andric SharedInitCode(options); 43068d75effSDimitry Andric 43168d75effSDimitry Andric // Poison all existing allocation's redzones. 43268d75effSDimitry Andric if (CanPoisonMemory()) { 43368d75effSDimitry Andric allocator.ForceLock(); 43468d75effSDimitry Andric allocator.ForEachChunk( 43568d75effSDimitry Andric [](uptr chunk, void *alloc) { 43668d75effSDimitry Andric ((Allocator *)alloc)->RePoisonChunk(chunk); 43768d75effSDimitry Andric }, 43868d75effSDimitry Andric this); 43968d75effSDimitry Andric allocator.ForceUnlock(); 44068d75effSDimitry Andric } 44168d75effSDimitry Andric } 44268d75effSDimitry Andric 44368d75effSDimitry Andric void GetOptions(AllocatorOptions *options) const { 44406c3fb27SDimitry Andric options->quarantine_size_mb = quarantine.GetMaxSize() >> 20; 44506c3fb27SDimitry Andric options->thread_local_quarantine_size_kb = 44606c3fb27SDimitry Andric quarantine.GetMaxCacheSize() >> 10; 44768d75effSDimitry Andric options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); 44868d75effSDimitry Andric options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); 44968d75effSDimitry Andric options->may_return_null = AllocatorMayReturnNull(); 45068d75effSDimitry Andric options->alloc_dealloc_mismatch = 45168d75effSDimitry Andric atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); 45268d75effSDimitry Andric options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); 45368d75effSDimitry Andric } 45468d75effSDimitry Andric 45568d75effSDimitry Andric // -------------------- Helper methods. ------------------------- 45668d75effSDimitry Andric uptr ComputeRZLog(uptr user_requested_size) { 457e8d8bef9SDimitry Andric u32 rz_log = user_requested_size <= 64 - 16 ? 0 458e8d8bef9SDimitry Andric : user_requested_size <= 128 - 32 ? 1 459e8d8bef9SDimitry Andric : user_requested_size <= 512 - 64 ? 2 460e8d8bef9SDimitry Andric : user_requested_size <= 4096 - 128 ? 3 461e8d8bef9SDimitry Andric : user_requested_size <= (1 << 14) - 256 ? 4 462e8d8bef9SDimitry Andric : user_requested_size <= (1 << 15) - 512 ? 5 463e8d8bef9SDimitry Andric : user_requested_size <= (1 << 16) - 1024 ? 6 464e8d8bef9SDimitry Andric : 7; 465e8d8bef9SDimitry Andric u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader))); 466e8d8bef9SDimitry Andric u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire)); 467e8d8bef9SDimitry Andric u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire)); 468e8d8bef9SDimitry Andric return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log)); 46968d75effSDimitry Andric } 47068d75effSDimitry Andric 47168d75effSDimitry Andric static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { 47268d75effSDimitry Andric if (user_requested_alignment < 8) 47368d75effSDimitry Andric return 0; 47468d75effSDimitry Andric if (user_requested_alignment > 512) 47568d75effSDimitry Andric user_requested_alignment = 512; 47668d75effSDimitry Andric return Log2(user_requested_alignment) - 2; 47768d75effSDimitry Andric } 47868d75effSDimitry Andric 47968d75effSDimitry Andric static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { 48068d75effSDimitry Andric if (user_requested_alignment_log == 0) 48168d75effSDimitry Andric return 0; 48268d75effSDimitry Andric return 1LL << (user_requested_alignment_log + 2); 48368d75effSDimitry Andric } 48468d75effSDimitry Andric 48568d75effSDimitry Andric // We have an address between two chunks, and we want to report just one. 48668d75effSDimitry Andric AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, 48768d75effSDimitry Andric AsanChunk *right_chunk) { 488e8d8bef9SDimitry Andric if (!left_chunk) 489e8d8bef9SDimitry Andric return right_chunk; 490e8d8bef9SDimitry Andric if (!right_chunk) 491e8d8bef9SDimitry Andric return left_chunk; 49268d75effSDimitry Andric // Prefer an allocated chunk over freed chunk and freed chunk 49368d75effSDimitry Andric // over available chunk. 494e8d8bef9SDimitry Andric u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed); 495e8d8bef9SDimitry Andric u8 right_state = 496e8d8bef9SDimitry Andric atomic_load(&right_chunk->chunk_state, memory_order_relaxed); 497e8d8bef9SDimitry Andric if (left_state != right_state) { 498e8d8bef9SDimitry Andric if (left_state == CHUNK_ALLOCATED) 49968d75effSDimitry Andric return left_chunk; 500e8d8bef9SDimitry Andric if (right_state == CHUNK_ALLOCATED) 50168d75effSDimitry Andric return right_chunk; 502e8d8bef9SDimitry Andric if (left_state == CHUNK_QUARANTINE) 50368d75effSDimitry Andric return left_chunk; 504e8d8bef9SDimitry Andric if (right_state == CHUNK_QUARANTINE) 50568d75effSDimitry Andric return right_chunk; 50668d75effSDimitry Andric } 50768d75effSDimitry Andric // Same chunk_state: choose based on offset. 50868d75effSDimitry Andric sptr l_offset = 0, r_offset = 0; 50968d75effSDimitry Andric CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 51068d75effSDimitry Andric CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 51168d75effSDimitry Andric if (l_offset < r_offset) 51268d75effSDimitry Andric return left_chunk; 51368d75effSDimitry Andric return right_chunk; 51468d75effSDimitry Andric } 51568d75effSDimitry Andric 516480093f4SDimitry Andric bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { 517480093f4SDimitry Andric AsanChunk *m = GetAsanChunkByAddr(addr); 518480093f4SDimitry Andric if (!m) return false; 519e8d8bef9SDimitry Andric if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) 520e8d8bef9SDimitry Andric return false; 521480093f4SDimitry Andric if (m->Beg() != addr) return false; 522e8d8bef9SDimitry Andric AsanThread *t = GetCurrentThread(); 523fe6060f1SDimitry Andric m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); 524480093f4SDimitry Andric return true; 525480093f4SDimitry Andric } 526480093f4SDimitry Andric 52768d75effSDimitry Andric // -------------------- Allocation/Deallocation routines --------------- 52868d75effSDimitry Andric void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, 52968d75effSDimitry Andric AllocType alloc_type, bool can_fill) { 5305f757f3fSDimitry Andric if (UNLIKELY(!AsanInited())) 53168d75effSDimitry Andric AsanInitFromRtl(); 5320eae32dcSDimitry Andric if (UNLIKELY(IsRssLimitExceeded())) { 53368d75effSDimitry Andric if (AllocatorMayReturnNull()) 53468d75effSDimitry Andric return nullptr; 53568d75effSDimitry Andric ReportRssLimitExceeded(stack); 53668d75effSDimitry Andric } 53768d75effSDimitry Andric Flags &fl = *flags(); 53868d75effSDimitry Andric CHECK(stack); 5390eae32dcSDimitry Andric const uptr min_alignment = ASAN_SHADOW_GRANULARITY; 54068d75effSDimitry Andric const uptr user_requested_alignment_log = 54168d75effSDimitry Andric ComputeUserRequestedAlignmentLog(alignment); 54268d75effSDimitry Andric if (alignment < min_alignment) 54368d75effSDimitry Andric alignment = min_alignment; 54468d75effSDimitry Andric if (size == 0) { 54568d75effSDimitry Andric // We'd be happy to avoid allocating memory for zero-size requests, but 54668d75effSDimitry Andric // some programs/tests depend on this behavior and assume that malloc 54768d75effSDimitry Andric // would not return NULL even for zero-size allocations. Moreover, it 54868d75effSDimitry Andric // looks like operator new should never return NULL, and results of 54968d75effSDimitry Andric // consecutive "new" calls must be different even if the allocated size 55068d75effSDimitry Andric // is zero. 55168d75effSDimitry Andric size = 1; 55268d75effSDimitry Andric } 55368d75effSDimitry Andric CHECK(IsPowerOfTwo(alignment)); 55468d75effSDimitry Andric uptr rz_log = ComputeRZLog(size); 55568d75effSDimitry Andric uptr rz_size = RZLog2Size(rz_log); 55668d75effSDimitry Andric uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); 55768d75effSDimitry Andric uptr needed_size = rounded_size + rz_size; 55868d75effSDimitry Andric if (alignment > min_alignment) 55968d75effSDimitry Andric needed_size += alignment; 56006c3fb27SDimitry Andric bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment); 56168d75effSDimitry Andric // If we are allocating from the secondary allocator, there will be no 56268d75effSDimitry Andric // automatic right redzone, so add the right redzone manually. 56306c3fb27SDimitry Andric if (!from_primary) 56468d75effSDimitry Andric needed_size += rz_size; 56568d75effSDimitry Andric CHECK(IsAligned(needed_size, min_alignment)); 566480093f4SDimitry Andric if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize || 567480093f4SDimitry Andric size > max_user_defined_malloc_size) { 56868d75effSDimitry Andric if (AllocatorMayReturnNull()) { 56968d75effSDimitry Andric Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", 570349cc55cSDimitry Andric size); 57168d75effSDimitry Andric return nullptr; 57268d75effSDimitry Andric } 573480093f4SDimitry Andric uptr malloc_limit = 574480093f4SDimitry Andric Min(kMaxAllowedMallocSize, max_user_defined_malloc_size); 575480093f4SDimitry Andric ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack); 57668d75effSDimitry Andric } 57768d75effSDimitry Andric 57868d75effSDimitry Andric AsanThread *t = GetCurrentThread(); 57968d75effSDimitry Andric void *allocated; 58068d75effSDimitry Andric if (t) { 58168d75effSDimitry Andric AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 58268d75effSDimitry Andric allocated = allocator.Allocate(cache, needed_size, 8); 58368d75effSDimitry Andric } else { 58468d75effSDimitry Andric SpinMutexLock l(&fallback_mutex); 58568d75effSDimitry Andric AllocatorCache *cache = &fallback_allocator_cache; 58668d75effSDimitry Andric allocated = allocator.Allocate(cache, needed_size, 8); 58768d75effSDimitry Andric } 58868d75effSDimitry Andric if (UNLIKELY(!allocated)) { 58968d75effSDimitry Andric SetAllocatorOutOfMemory(); 59068d75effSDimitry Andric if (AllocatorMayReturnNull()) 59168d75effSDimitry Andric return nullptr; 59268d75effSDimitry Andric ReportOutOfMemory(size, stack); 59368d75effSDimitry Andric } 59468d75effSDimitry Andric 59568d75effSDimitry Andric uptr alloc_beg = reinterpret_cast<uptr>(allocated); 59668d75effSDimitry Andric uptr alloc_end = alloc_beg + needed_size; 597e8d8bef9SDimitry Andric uptr user_beg = alloc_beg + rz_size; 59868d75effSDimitry Andric if (!IsAligned(user_beg, alignment)) 59968d75effSDimitry Andric user_beg = RoundUpTo(user_beg, alignment); 60068d75effSDimitry Andric uptr user_end = user_beg + size; 60168d75effSDimitry Andric CHECK_LE(user_end, alloc_end); 60268d75effSDimitry Andric uptr chunk_beg = user_beg - kChunkHeaderSize; 60368d75effSDimitry Andric AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 60468d75effSDimitry Andric m->alloc_type = alloc_type; 60568d75effSDimitry Andric CHECK(size); 606e8d8bef9SDimitry Andric m->SetUsedSize(size); 60768d75effSDimitry Andric m->user_requested_alignment_log = user_requested_alignment_log; 60868d75effSDimitry Andric 609fe6060f1SDimitry Andric m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack)); 61068d75effSDimitry Andric 61106c3fb27SDimitry Andric if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) { 61206c3fb27SDimitry Andric // The allocator provides an unpoisoned chunk. This is possible for the 61306c3fb27SDimitry Andric // secondary allocator, or if CanPoisonMemory() was false for some time, 61406c3fb27SDimitry Andric // for example, due to flags()->start_disabled. Anyway, poison left and 61506c3fb27SDimitry Andric // right of the block before using it for anything else. 61606c3fb27SDimitry Andric uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY); 61706c3fb27SDimitry Andric uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated); 61806c3fb27SDimitry Andric PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic); 61906c3fb27SDimitry Andric PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic); 62006c3fb27SDimitry Andric } 62106c3fb27SDimitry Andric 62268d75effSDimitry Andric uptr size_rounded_down_to_granularity = 6230eae32dcSDimitry Andric RoundDownTo(size, ASAN_SHADOW_GRANULARITY); 62468d75effSDimitry Andric // Unpoison the bulk of the memory region. 62568d75effSDimitry Andric if (size_rounded_down_to_granularity) 62668d75effSDimitry Andric PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 62768d75effSDimitry Andric // Deal with the end of the region if size is not aligned to granularity. 62868d75effSDimitry Andric if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { 62968d75effSDimitry Andric u8 *shadow = 63068d75effSDimitry Andric (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); 6310eae32dcSDimitry Andric *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; 63268d75effSDimitry Andric } 63368d75effSDimitry Andric 63468d75effSDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 63568d75effSDimitry Andric thread_stats.mallocs++; 63668d75effSDimitry Andric thread_stats.malloced += size; 63768d75effSDimitry Andric thread_stats.malloced_redzones += needed_size - size; 63868d75effSDimitry Andric if (needed_size > SizeClassMap::kMaxSize) 63968d75effSDimitry Andric thread_stats.malloc_large++; 64068d75effSDimitry Andric else 64168d75effSDimitry Andric thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; 64268d75effSDimitry Andric 64368d75effSDimitry Andric void *res = reinterpret_cast<void *>(user_beg); 64468d75effSDimitry Andric if (can_fill && fl.max_malloc_fill_size) { 64568d75effSDimitry Andric uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 64668d75effSDimitry Andric REAL(memset)(res, fl.malloc_fill_byte, fill_size); 64768d75effSDimitry Andric } 64868d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 64968d75effSDimitry Andric m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored 65068d75effSDimitry Andric : __lsan::kDirectlyLeaked; 65168d75effSDimitry Andric #endif 65268d75effSDimitry Andric // Must be the last mutation of metadata in this function. 653e8d8bef9SDimitry Andric atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); 654e8d8bef9SDimitry Andric if (alloc_beg != chunk_beg) { 655e8d8bef9SDimitry Andric CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg); 656e8d8bef9SDimitry Andric reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m); 657e8d8bef9SDimitry Andric } 65881ad6265SDimitry Andric RunMallocHooks(res, size); 65968d75effSDimitry Andric return res; 66068d75effSDimitry Andric } 66168d75effSDimitry Andric 66268d75effSDimitry Andric // Set quarantine flag if chunk is allocated, issue ASan error report on 66368d75effSDimitry Andric // available and quarantined chunks. Return true on success, false otherwise. 66468d75effSDimitry Andric bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, 66568d75effSDimitry Andric BufferedStackTrace *stack) { 66668d75effSDimitry Andric u8 old_chunk_state = CHUNK_ALLOCATED; 66768d75effSDimitry Andric // Flip the chunk_state atomically to avoid race on double-free. 668e8d8bef9SDimitry Andric if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, 66968d75effSDimitry Andric CHUNK_QUARANTINE, 67068d75effSDimitry Andric memory_order_acquire)) { 67168d75effSDimitry Andric ReportInvalidFree(ptr, old_chunk_state, stack); 67268d75effSDimitry Andric // It's not safe to push a chunk in quarantine on invalid free. 67368d75effSDimitry Andric return false; 67468d75effSDimitry Andric } 67568d75effSDimitry Andric CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 676e8d8bef9SDimitry Andric // It was a user data. 677e8d8bef9SDimitry Andric m->SetFreeContext(kInvalidTid, 0); 67868d75effSDimitry Andric return true; 67968d75effSDimitry Andric } 68068d75effSDimitry Andric 68168d75effSDimitry Andric // Expects the chunk to already be marked as quarantined by using 68268d75effSDimitry Andric // AtomicallySetQuarantineFlagIfAllocated. 68368d75effSDimitry Andric void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { 684e8d8bef9SDimitry Andric CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed), 685e8d8bef9SDimitry Andric CHUNK_QUARANTINE); 68668d75effSDimitry Andric AsanThread *t = GetCurrentThread(); 687e8d8bef9SDimitry Andric m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack)); 68868d75effSDimitry Andric 68968d75effSDimitry Andric // Push into quarantine. 69068d75effSDimitry Andric if (t) { 69168d75effSDimitry Andric AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 69268d75effSDimitry Andric AllocatorCache *ac = GetAllocatorCache(ms); 69368d75effSDimitry Andric quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, 69468d75effSDimitry Andric m->UsedSize()); 69568d75effSDimitry Andric } else { 69668d75effSDimitry Andric SpinMutexLock l(&fallback_mutex); 69768d75effSDimitry Andric AllocatorCache *ac = &fallback_allocator_cache; 69868d75effSDimitry Andric quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), 69968d75effSDimitry Andric m, m->UsedSize()); 70068d75effSDimitry Andric } 70168d75effSDimitry Andric } 70268d75effSDimitry Andric 70368d75effSDimitry Andric void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, 70468d75effSDimitry Andric BufferedStackTrace *stack, AllocType alloc_type) { 70568d75effSDimitry Andric uptr p = reinterpret_cast<uptr>(ptr); 70668d75effSDimitry Andric if (p == 0) return; 70768d75effSDimitry Andric 70868d75effSDimitry Andric uptr chunk_beg = p - kChunkHeaderSize; 70968d75effSDimitry Andric AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 71068d75effSDimitry Andric 71168d75effSDimitry Andric // On Windows, uninstrumented DLLs may allocate memory before ASan hooks 71268d75effSDimitry Andric // malloc. Don't report an invalid free in this case. 71368d75effSDimitry Andric if (SANITIZER_WINDOWS && 71468d75effSDimitry Andric !get_allocator().PointerIsMine(ptr)) { 71568d75effSDimitry Andric if (!IsSystemHeapAddress(p)) 71668d75effSDimitry Andric ReportFreeNotMalloced(p, stack); 71768d75effSDimitry Andric return; 71868d75effSDimitry Andric } 71968d75effSDimitry Andric 720*0fca6ea1SDimitry Andric if (RunFreeHooks(ptr)) { 721*0fca6ea1SDimitry Andric // Someone used __sanitizer_ignore_free_hook() and decided that they 722*0fca6ea1SDimitry Andric // didn't want the memory to __sanitizer_ignore_free_hook freed right now. 723*0fca6ea1SDimitry Andric // When they call free() on this pointer again at a later time, we should 724*0fca6ea1SDimitry Andric // ignore the alloc-type mismatch and allow them to deallocate the pointer 725*0fca6ea1SDimitry Andric // through free(), rather than the initial alloc type. 726*0fca6ea1SDimitry Andric m->alloc_type = FROM_MALLOC; 727*0fca6ea1SDimitry Andric return; 728*0fca6ea1SDimitry Andric } 72968d75effSDimitry Andric 73068d75effSDimitry Andric // Must mark the chunk as quarantined before any changes to its metadata. 73168d75effSDimitry Andric // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. 73268d75effSDimitry Andric if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; 73368d75effSDimitry Andric 73468d75effSDimitry Andric if (m->alloc_type != alloc_type) { 73568d75effSDimitry Andric if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { 73668d75effSDimitry Andric ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, 73768d75effSDimitry Andric (AllocType)alloc_type); 73868d75effSDimitry Andric } 73968d75effSDimitry Andric } else { 74068d75effSDimitry Andric if (flags()->new_delete_type_mismatch && 74168d75effSDimitry Andric (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && 74268d75effSDimitry Andric ((delete_size && delete_size != m->UsedSize()) || 74368d75effSDimitry Andric ComputeUserRequestedAlignmentLog(delete_alignment) != 74468d75effSDimitry Andric m->user_requested_alignment_log)) { 74568d75effSDimitry Andric ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); 74668d75effSDimitry Andric } 74768d75effSDimitry Andric } 74868d75effSDimitry Andric 74906c3fb27SDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 75006c3fb27SDimitry Andric thread_stats.frees++; 75106c3fb27SDimitry Andric thread_stats.freed += m->UsedSize(); 75206c3fb27SDimitry Andric 75368d75effSDimitry Andric QuarantineChunk(m, ptr, stack); 75468d75effSDimitry Andric } 75568d75effSDimitry Andric 75668d75effSDimitry Andric void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { 75768d75effSDimitry Andric CHECK(old_ptr && new_size); 75868d75effSDimitry Andric uptr p = reinterpret_cast<uptr>(old_ptr); 75968d75effSDimitry Andric uptr chunk_beg = p - kChunkHeaderSize; 76068d75effSDimitry Andric AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 76168d75effSDimitry Andric 76268d75effSDimitry Andric AsanStats &thread_stats = GetCurrentThreadStats(); 76368d75effSDimitry Andric thread_stats.reallocs++; 76468d75effSDimitry Andric thread_stats.realloced += new_size; 76568d75effSDimitry Andric 76668d75effSDimitry Andric void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 76768d75effSDimitry Andric if (new_ptr) { 768e8d8bef9SDimitry Andric u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire); 76968d75effSDimitry Andric if (chunk_state != CHUNK_ALLOCATED) 77068d75effSDimitry Andric ReportInvalidFree(old_ptr, chunk_state, stack); 77168d75effSDimitry Andric CHECK_NE(REAL(memcpy), nullptr); 77268d75effSDimitry Andric uptr memcpy_size = Min(new_size, m->UsedSize()); 77368d75effSDimitry Andric // If realloc() races with free(), we may start copying freed memory. 77468d75effSDimitry Andric // However, we will report racy double-free later anyway. 77568d75effSDimitry Andric REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 77668d75effSDimitry Andric Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); 77768d75effSDimitry Andric } 77868d75effSDimitry Andric return new_ptr; 77968d75effSDimitry Andric } 78068d75effSDimitry Andric 78168d75effSDimitry Andric void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { 78268d75effSDimitry Andric if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 78368d75effSDimitry Andric if (AllocatorMayReturnNull()) 78468d75effSDimitry Andric return nullptr; 78568d75effSDimitry Andric ReportCallocOverflow(nmemb, size, stack); 78668d75effSDimitry Andric } 78768d75effSDimitry Andric void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 78868d75effSDimitry Andric // If the memory comes from the secondary allocator no need to clear it 78968d75effSDimitry Andric // as it comes directly from mmap. 79068d75effSDimitry Andric if (ptr && allocator.FromPrimary(ptr)) 79168d75effSDimitry Andric REAL(memset)(ptr, 0, nmemb * size); 79268d75effSDimitry Andric return ptr; 79368d75effSDimitry Andric } 79468d75effSDimitry Andric 79568d75effSDimitry Andric void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { 79668d75effSDimitry Andric if (chunk_state == CHUNK_QUARANTINE) 79768d75effSDimitry Andric ReportDoubleFree((uptr)ptr, stack); 79868d75effSDimitry Andric else 79968d75effSDimitry Andric ReportFreeNotMalloced((uptr)ptr, stack); 80068d75effSDimitry Andric } 80168d75effSDimitry Andric 80268d75effSDimitry Andric void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { 80368d75effSDimitry Andric AllocatorCache *ac = GetAllocatorCache(ms); 80468d75effSDimitry Andric quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); 80568d75effSDimitry Andric allocator.SwallowCache(ac); 80668d75effSDimitry Andric } 80768d75effSDimitry Andric 80868d75effSDimitry Andric // -------------------------- Chunk lookup ---------------------- 80968d75effSDimitry Andric 81068d75effSDimitry Andric // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). 811e8d8bef9SDimitry Andric // Returns nullptr if AsanChunk is not yet initialized just after 812e8d8bef9SDimitry Andric // get_allocator().Allocate(), or is being destroyed just before 813e8d8bef9SDimitry Andric // get_allocator().Deallocate(). 81468d75effSDimitry Andric AsanChunk *GetAsanChunk(void *alloc_beg) { 815e8d8bef9SDimitry Andric if (!alloc_beg) 816e8d8bef9SDimitry Andric return nullptr; 817e8d8bef9SDimitry Andric AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get(); 818e8d8bef9SDimitry Andric if (!p) { 819e8d8bef9SDimitry Andric if (!allocator.FromPrimary(alloc_beg)) 820e8d8bef9SDimitry Andric return nullptr; 821e8d8bef9SDimitry Andric p = reinterpret_cast<AsanChunk *>(alloc_beg); 82268d75effSDimitry Andric } 823e8d8bef9SDimitry Andric u8 state = atomic_load(&p->chunk_state, memory_order_relaxed); 824e8d8bef9SDimitry Andric // It does not guaranty that Chunk is initialized, but it's 825e8d8bef9SDimitry Andric // definitely not for any other value. 826e8d8bef9SDimitry Andric if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE) 827e8d8bef9SDimitry Andric return p; 828e8d8bef9SDimitry Andric return nullptr; 82968d75effSDimitry Andric } 83068d75effSDimitry Andric 83168d75effSDimitry Andric AsanChunk *GetAsanChunkByAddr(uptr p) { 83268d75effSDimitry Andric void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); 83368d75effSDimitry Andric return GetAsanChunk(alloc_beg); 83468d75effSDimitry Andric } 83568d75effSDimitry Andric 83668d75effSDimitry Andric // Allocator must be locked when this function is called. 83768d75effSDimitry Andric AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { 83868d75effSDimitry Andric void *alloc_beg = 83968d75effSDimitry Andric allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); 84068d75effSDimitry Andric return GetAsanChunk(alloc_beg); 84168d75effSDimitry Andric } 84268d75effSDimitry Andric 84368d75effSDimitry Andric uptr AllocationSize(uptr p) { 84468d75effSDimitry Andric AsanChunk *m = GetAsanChunkByAddr(p); 84568d75effSDimitry Andric if (!m) return 0; 846e8d8bef9SDimitry Andric if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) 847e8d8bef9SDimitry Andric return 0; 84868d75effSDimitry Andric if (m->Beg() != p) return 0; 84968d75effSDimitry Andric return m->UsedSize(); 85068d75effSDimitry Andric } 85168d75effSDimitry Andric 85206c3fb27SDimitry Andric uptr AllocationSizeFast(uptr p) { 85306c3fb27SDimitry Andric return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize(); 85406c3fb27SDimitry Andric } 85506c3fb27SDimitry Andric 85668d75effSDimitry Andric AsanChunkView FindHeapChunkByAddress(uptr addr) { 85768d75effSDimitry Andric AsanChunk *m1 = GetAsanChunkByAddr(addr); 85868d75effSDimitry Andric sptr offset = 0; 859e8d8bef9SDimitry Andric if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 86068d75effSDimitry Andric // The address is in the chunk's left redzone, so maybe it is actually 861bdd1243dSDimitry Andric // a right buffer overflow from the other chunk before. 862bdd1243dSDimitry Andric // Search a bit before to see if there is another chunk. 86368d75effSDimitry Andric AsanChunk *m2 = nullptr; 86468d75effSDimitry Andric for (uptr l = 1; l < GetPageSizeCached(); l++) { 86568d75effSDimitry Andric m2 = GetAsanChunkByAddr(addr - l); 86668d75effSDimitry Andric if (m2 == m1) continue; // Still the same chunk. 86768d75effSDimitry Andric break; 86868d75effSDimitry Andric } 86968d75effSDimitry Andric if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 87068d75effSDimitry Andric m1 = ChooseChunk(addr, m2, m1); 87168d75effSDimitry Andric } 87268d75effSDimitry Andric return AsanChunkView(m1); 87368d75effSDimitry Andric } 87468d75effSDimitry Andric 87568d75effSDimitry Andric void Purge(BufferedStackTrace *stack) { 87668d75effSDimitry Andric AsanThread *t = GetCurrentThread(); 87768d75effSDimitry Andric if (t) { 87868d75effSDimitry Andric AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 87968d75effSDimitry Andric quarantine.DrainAndRecycle(GetQuarantineCache(ms), 88068d75effSDimitry Andric QuarantineCallback(GetAllocatorCache(ms), 88168d75effSDimitry Andric stack)); 88268d75effSDimitry Andric } 88368d75effSDimitry Andric { 88468d75effSDimitry Andric SpinMutexLock l(&fallback_mutex); 88568d75effSDimitry Andric quarantine.DrainAndRecycle(&fallback_quarantine_cache, 88668d75effSDimitry Andric QuarantineCallback(&fallback_allocator_cache, 88768d75effSDimitry Andric stack)); 88868d75effSDimitry Andric } 88968d75effSDimitry Andric 89068d75effSDimitry Andric allocator.ForceReleaseToOS(); 89168d75effSDimitry Andric } 89268d75effSDimitry Andric 89368d75effSDimitry Andric void PrintStats() { 89468d75effSDimitry Andric allocator.PrintStats(); 89568d75effSDimitry Andric quarantine.PrintStats(); 89668d75effSDimitry Andric } 89768d75effSDimitry Andric 89804eeddc0SDimitry Andric void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) { 89968d75effSDimitry Andric allocator.ForceLock(); 90068d75effSDimitry Andric fallback_mutex.Lock(); 90168d75effSDimitry Andric } 90268d75effSDimitry Andric 90304eeddc0SDimitry Andric void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) { 90468d75effSDimitry Andric fallback_mutex.Unlock(); 90568d75effSDimitry Andric allocator.ForceUnlock(); 90668d75effSDimitry Andric } 90768d75effSDimitry Andric }; 90868d75effSDimitry Andric 90968d75effSDimitry Andric static Allocator instance(LINKER_INITIALIZED); 91068d75effSDimitry Andric 91168d75effSDimitry Andric static AsanAllocator &get_allocator() { 91268d75effSDimitry Andric return instance.allocator; 91368d75effSDimitry Andric } 91468d75effSDimitry Andric 91568d75effSDimitry Andric bool AsanChunkView::IsValid() const { 916e8d8bef9SDimitry Andric return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) != 917e8d8bef9SDimitry Andric CHUNK_INVALID; 91868d75effSDimitry Andric } 91968d75effSDimitry Andric bool AsanChunkView::IsAllocated() const { 920e8d8bef9SDimitry Andric return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == 921e8d8bef9SDimitry Andric CHUNK_ALLOCATED; 92268d75effSDimitry Andric } 92368d75effSDimitry Andric bool AsanChunkView::IsQuarantined() const { 924e8d8bef9SDimitry Andric return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == 925e8d8bef9SDimitry Andric CHUNK_QUARANTINE; 92668d75effSDimitry Andric } 92768d75effSDimitry Andric uptr AsanChunkView::Beg() const { return chunk_->Beg(); } 92868d75effSDimitry Andric uptr AsanChunkView::End() const { return Beg() + UsedSize(); } 92968d75effSDimitry Andric uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } 93068d75effSDimitry Andric u32 AsanChunkView::UserRequestedAlignment() const { 93168d75effSDimitry Andric return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); 93268d75effSDimitry Andric } 933e8d8bef9SDimitry Andric 934e8d8bef9SDimitry Andric uptr AsanChunkView::AllocTid() const { 935e8d8bef9SDimitry Andric u32 tid = 0; 936e8d8bef9SDimitry Andric u32 stack = 0; 937e8d8bef9SDimitry Andric chunk_->GetAllocContext(tid, stack); 938e8d8bef9SDimitry Andric return tid; 939e8d8bef9SDimitry Andric } 940e8d8bef9SDimitry Andric 941e8d8bef9SDimitry Andric uptr AsanChunkView::FreeTid() const { 942e8d8bef9SDimitry Andric if (!IsQuarantined()) 943e8d8bef9SDimitry Andric return kInvalidTid; 944e8d8bef9SDimitry Andric u32 tid = 0; 945e8d8bef9SDimitry Andric u32 stack = 0; 946e8d8bef9SDimitry Andric chunk_->GetFreeContext(tid, stack); 947e8d8bef9SDimitry Andric return tid; 948e8d8bef9SDimitry Andric } 949e8d8bef9SDimitry Andric 95068d75effSDimitry Andric AllocType AsanChunkView::GetAllocType() const { 95168d75effSDimitry Andric return (AllocType)chunk_->alloc_type; 95268d75effSDimitry Andric } 95368d75effSDimitry Andric 954e8d8bef9SDimitry Andric u32 AsanChunkView::GetAllocStackId() const { 955e8d8bef9SDimitry Andric u32 tid = 0; 956e8d8bef9SDimitry Andric u32 stack = 0; 957e8d8bef9SDimitry Andric chunk_->GetAllocContext(tid, stack); 958e8d8bef9SDimitry Andric return stack; 959e8d8bef9SDimitry Andric } 960e8d8bef9SDimitry Andric 961e8d8bef9SDimitry Andric u32 AsanChunkView::GetFreeStackId() const { 962e8d8bef9SDimitry Andric if (!IsQuarantined()) 963e8d8bef9SDimitry Andric return 0; 964e8d8bef9SDimitry Andric u32 tid = 0; 965e8d8bef9SDimitry Andric u32 stack = 0; 966e8d8bef9SDimitry Andric chunk_->GetFreeContext(tid, stack); 967e8d8bef9SDimitry Andric return stack; 968e8d8bef9SDimitry Andric } 96968d75effSDimitry Andric 97068d75effSDimitry Andric void InitializeAllocator(const AllocatorOptions &options) { 97168d75effSDimitry Andric instance.InitLinkerInitialized(options); 97268d75effSDimitry Andric } 97368d75effSDimitry Andric 97468d75effSDimitry Andric void ReInitializeAllocator(const AllocatorOptions &options) { 97568d75effSDimitry Andric instance.ReInitialize(options); 97668d75effSDimitry Andric } 97768d75effSDimitry Andric 97868d75effSDimitry Andric void GetAllocatorOptions(AllocatorOptions *options) { 97968d75effSDimitry Andric instance.GetOptions(options); 98068d75effSDimitry Andric } 98168d75effSDimitry Andric 98268d75effSDimitry Andric AsanChunkView FindHeapChunkByAddress(uptr addr) { 98368d75effSDimitry Andric return instance.FindHeapChunkByAddress(addr); 98468d75effSDimitry Andric } 98568d75effSDimitry Andric AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { 98668d75effSDimitry Andric return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr))); 98768d75effSDimitry Andric } 98868d75effSDimitry Andric 98968d75effSDimitry Andric void AsanThreadLocalMallocStorage::CommitBack() { 99068d75effSDimitry Andric GET_STACK_TRACE_MALLOC; 99168d75effSDimitry Andric instance.CommitBack(this, &stack); 99268d75effSDimitry Andric } 99368d75effSDimitry Andric 99468d75effSDimitry Andric void PrintInternalAllocatorStats() { 99568d75effSDimitry Andric instance.PrintStats(); 99668d75effSDimitry Andric } 99768d75effSDimitry Andric 99868d75effSDimitry Andric void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { 99968d75effSDimitry Andric instance.Deallocate(ptr, 0, 0, stack, alloc_type); 100068d75effSDimitry Andric } 100168d75effSDimitry Andric 100268d75effSDimitry Andric void asan_delete(void *ptr, uptr size, uptr alignment, 100368d75effSDimitry Andric BufferedStackTrace *stack, AllocType alloc_type) { 100468d75effSDimitry Andric instance.Deallocate(ptr, size, alignment, stack, alloc_type); 100568d75effSDimitry Andric } 100668d75effSDimitry Andric 100768d75effSDimitry Andric void *asan_malloc(uptr size, BufferedStackTrace *stack) { 100868d75effSDimitry Andric return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); 100968d75effSDimitry Andric } 101068d75effSDimitry Andric 101168d75effSDimitry Andric void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { 101268d75effSDimitry Andric return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); 101368d75effSDimitry Andric } 101468d75effSDimitry Andric 101568d75effSDimitry Andric void *asan_reallocarray(void *p, uptr nmemb, uptr size, 101668d75effSDimitry Andric BufferedStackTrace *stack) { 101768d75effSDimitry Andric if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 101868d75effSDimitry Andric errno = errno_ENOMEM; 101968d75effSDimitry Andric if (AllocatorMayReturnNull()) 102068d75effSDimitry Andric return nullptr; 102168d75effSDimitry Andric ReportReallocArrayOverflow(nmemb, size, stack); 102268d75effSDimitry Andric } 102368d75effSDimitry Andric return asan_realloc(p, nmemb * size, stack); 102468d75effSDimitry Andric } 102568d75effSDimitry Andric 102668d75effSDimitry Andric void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { 102768d75effSDimitry Andric if (!p) 102868d75effSDimitry Andric return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); 102968d75effSDimitry Andric if (size == 0) { 103068d75effSDimitry Andric if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { 103168d75effSDimitry Andric instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); 103268d75effSDimitry Andric return nullptr; 103368d75effSDimitry Andric } 103468d75effSDimitry Andric // Allocate a size of 1 if we shouldn't free() on Realloc to 0 103568d75effSDimitry Andric size = 1; 103668d75effSDimitry Andric } 103768d75effSDimitry Andric return SetErrnoOnNull(instance.Reallocate(p, size, stack)); 103868d75effSDimitry Andric } 103968d75effSDimitry Andric 104068d75effSDimitry Andric void *asan_valloc(uptr size, BufferedStackTrace *stack) { 104168d75effSDimitry Andric return SetErrnoOnNull( 104268d75effSDimitry Andric instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); 104368d75effSDimitry Andric } 104468d75effSDimitry Andric 104568d75effSDimitry Andric void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { 104668d75effSDimitry Andric uptr PageSize = GetPageSizeCached(); 104768d75effSDimitry Andric if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 104868d75effSDimitry Andric errno = errno_ENOMEM; 104968d75effSDimitry Andric if (AllocatorMayReturnNull()) 105068d75effSDimitry Andric return nullptr; 105168d75effSDimitry Andric ReportPvallocOverflow(size, stack); 105268d75effSDimitry Andric } 105368d75effSDimitry Andric // pvalloc(0) should allocate one page. 105468d75effSDimitry Andric size = size ? RoundUpTo(size, PageSize) : PageSize; 105568d75effSDimitry Andric return SetErrnoOnNull( 105668d75effSDimitry Andric instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); 105768d75effSDimitry Andric } 105868d75effSDimitry Andric 105968d75effSDimitry Andric void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, 106068d75effSDimitry Andric AllocType alloc_type) { 106168d75effSDimitry Andric if (UNLIKELY(!IsPowerOfTwo(alignment))) { 106268d75effSDimitry Andric errno = errno_EINVAL; 106368d75effSDimitry Andric if (AllocatorMayReturnNull()) 106468d75effSDimitry Andric return nullptr; 106568d75effSDimitry Andric ReportInvalidAllocationAlignment(alignment, stack); 106668d75effSDimitry Andric } 106768d75effSDimitry Andric return SetErrnoOnNull( 106868d75effSDimitry Andric instance.Allocate(size, alignment, stack, alloc_type, true)); 106968d75effSDimitry Andric } 107068d75effSDimitry Andric 107168d75effSDimitry Andric void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { 107268d75effSDimitry Andric if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 107368d75effSDimitry Andric errno = errno_EINVAL; 107468d75effSDimitry Andric if (AllocatorMayReturnNull()) 107568d75effSDimitry Andric return nullptr; 107668d75effSDimitry Andric ReportInvalidAlignedAllocAlignment(size, alignment, stack); 107768d75effSDimitry Andric } 107868d75effSDimitry Andric return SetErrnoOnNull( 107968d75effSDimitry Andric instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); 108068d75effSDimitry Andric } 108168d75effSDimitry Andric 108268d75effSDimitry Andric int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 108368d75effSDimitry Andric BufferedStackTrace *stack) { 108468d75effSDimitry Andric if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 108568d75effSDimitry Andric if (AllocatorMayReturnNull()) 108668d75effSDimitry Andric return errno_EINVAL; 108768d75effSDimitry Andric ReportInvalidPosixMemalignAlignment(alignment, stack); 108868d75effSDimitry Andric } 108968d75effSDimitry Andric void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); 109068d75effSDimitry Andric if (UNLIKELY(!ptr)) 109168d75effSDimitry Andric // OOM error is already taken care of by Allocate. 109268d75effSDimitry Andric return errno_ENOMEM; 109368d75effSDimitry Andric CHECK(IsAligned((uptr)ptr, alignment)); 109468d75effSDimitry Andric *memptr = ptr; 109568d75effSDimitry Andric return 0; 109668d75effSDimitry Andric } 109768d75effSDimitry Andric 109868d75effSDimitry Andric uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { 109968d75effSDimitry Andric if (!ptr) return 0; 110068d75effSDimitry Andric uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); 110168d75effSDimitry Andric if (flags()->check_malloc_usable_size && (usable_size == 0)) { 110268d75effSDimitry Andric GET_STACK_TRACE_FATAL(pc, bp); 110368d75effSDimitry Andric ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); 110468d75effSDimitry Andric } 110568d75effSDimitry Andric return usable_size; 110668d75effSDimitry Andric } 110768d75effSDimitry Andric 110868d75effSDimitry Andric uptr asan_mz_size(const void *ptr) { 110968d75effSDimitry Andric return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); 111068d75effSDimitry Andric } 111168d75effSDimitry Andric 111204eeddc0SDimitry Andric void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 111304eeddc0SDimitry Andric instance.ForceLock(); 111404eeddc0SDimitry Andric } 111568d75effSDimitry Andric 111604eeddc0SDimitry Andric void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 111768d75effSDimitry Andric instance.ForceUnlock(); 111868d75effSDimitry Andric } 111968d75effSDimitry Andric 112068d75effSDimitry Andric } // namespace __asan 112168d75effSDimitry Andric 112268d75effSDimitry Andric // --- Implementation of LSan-specific functions --- {{{1 112368d75effSDimitry Andric namespace __lsan { 112468d75effSDimitry Andric void LockAllocator() { 112568d75effSDimitry Andric __asan::get_allocator().ForceLock(); 112668d75effSDimitry Andric } 112768d75effSDimitry Andric 112868d75effSDimitry Andric void UnlockAllocator() { 112968d75effSDimitry Andric __asan::get_allocator().ForceUnlock(); 113068d75effSDimitry Andric } 113168d75effSDimitry Andric 113268d75effSDimitry Andric void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 113368d75effSDimitry Andric *begin = (uptr)&__asan::get_allocator(); 113468d75effSDimitry Andric *end = *begin + sizeof(__asan::get_allocator()); 113568d75effSDimitry Andric } 113668d75effSDimitry Andric 113768d75effSDimitry Andric uptr PointsIntoChunk(void *p) { 113868d75effSDimitry Andric uptr addr = reinterpret_cast<uptr>(p); 113968d75effSDimitry Andric __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); 1140e8d8bef9SDimitry Andric if (!m || atomic_load(&m->chunk_state, memory_order_acquire) != 1141e8d8bef9SDimitry Andric __asan::CHUNK_ALLOCATED) 114268d75effSDimitry Andric return 0; 1143e8d8bef9SDimitry Andric uptr chunk = m->Beg(); 1144e8d8bef9SDimitry Andric if (m->AddrIsInside(addr)) 114568d75effSDimitry Andric return chunk; 1146e8d8bef9SDimitry Andric if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr)) 114768d75effSDimitry Andric return chunk; 114868d75effSDimitry Andric return 0; 114968d75effSDimitry Andric } 115068d75effSDimitry Andric 115168d75effSDimitry Andric uptr GetUserBegin(uptr chunk) { 1152bdd1243dSDimitry Andric // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is 1153bdd1243dSDimitry Andric // not needed. 115468d75effSDimitry Andric __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); 1155e8d8bef9SDimitry Andric return m ? m->Beg() : 0; 115668d75effSDimitry Andric } 115768d75effSDimitry Andric 115806c3fb27SDimitry Andric uptr GetUserAddr(uptr chunk) { 115906c3fb27SDimitry Andric return chunk; 116006c3fb27SDimitry Andric } 116106c3fb27SDimitry Andric 116268d75effSDimitry Andric LsanMetadata::LsanMetadata(uptr chunk) { 1163e8d8bef9SDimitry Andric metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize) 1164e8d8bef9SDimitry Andric : nullptr; 116568d75effSDimitry Andric } 116668d75effSDimitry Andric 116768d75effSDimitry Andric bool LsanMetadata::allocated() const { 1168e8d8bef9SDimitry Andric if (!metadata_) 1169e8d8bef9SDimitry Andric return false; 117068d75effSDimitry Andric __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1171e8d8bef9SDimitry Andric return atomic_load(&m->chunk_state, memory_order_relaxed) == 1172e8d8bef9SDimitry Andric __asan::CHUNK_ALLOCATED; 117368d75effSDimitry Andric } 117468d75effSDimitry Andric 117568d75effSDimitry Andric ChunkTag LsanMetadata::tag() const { 117668d75effSDimitry Andric __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 117768d75effSDimitry Andric return static_cast<ChunkTag>(m->lsan_tag); 117868d75effSDimitry Andric } 117968d75effSDimitry Andric 118068d75effSDimitry Andric void LsanMetadata::set_tag(ChunkTag value) { 118168d75effSDimitry Andric __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 118268d75effSDimitry Andric m->lsan_tag = value; 118368d75effSDimitry Andric } 118468d75effSDimitry Andric 118568d75effSDimitry Andric uptr LsanMetadata::requested_size() const { 118668d75effSDimitry Andric __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1187e8d8bef9SDimitry Andric return m->UsedSize(); 118868d75effSDimitry Andric } 118968d75effSDimitry Andric 119068d75effSDimitry Andric u32 LsanMetadata::stack_trace_id() const { 119168d75effSDimitry Andric __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 1192e8d8bef9SDimitry Andric u32 tid = 0; 1193e8d8bef9SDimitry Andric u32 stack = 0; 1194e8d8bef9SDimitry Andric m->GetAllocContext(tid, stack); 1195e8d8bef9SDimitry Andric return stack; 119668d75effSDimitry Andric } 119768d75effSDimitry Andric 119868d75effSDimitry Andric void ForEachChunk(ForEachChunkCallback callback, void *arg) { 119968d75effSDimitry Andric __asan::get_allocator().ForEachChunk(callback, arg); 120068d75effSDimitry Andric } 120168d75effSDimitry Andric 120206c3fb27SDimitry Andric IgnoreObjectResult IgnoreObject(const void *p) { 120368d75effSDimitry Andric uptr addr = reinterpret_cast<uptr>(p); 120468d75effSDimitry Andric __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); 1205e8d8bef9SDimitry Andric if (!m || 1206e8d8bef9SDimitry Andric (atomic_load(&m->chunk_state, memory_order_acquire) != 1207e8d8bef9SDimitry Andric __asan::CHUNK_ALLOCATED) || 1208e8d8bef9SDimitry Andric !m->AddrIsInside(addr)) { 1209e8d8bef9SDimitry Andric return kIgnoreObjectInvalid; 1210e8d8bef9SDimitry Andric } 121168d75effSDimitry Andric if (m->lsan_tag == kIgnored) 121268d75effSDimitry Andric return kIgnoreObjectAlreadyIgnored; 121368d75effSDimitry Andric m->lsan_tag = __lsan::kIgnored; 121468d75effSDimitry Andric return kIgnoreObjectSuccess; 121568d75effSDimitry Andric } 1216e8d8bef9SDimitry Andric 121768d75effSDimitry Andric } // namespace __lsan 121868d75effSDimitry Andric 121968d75effSDimitry Andric // ---------------------- Interface ---------------- {{{1 122068d75effSDimitry Andric using namespace __asan; 122168d75effSDimitry Andric 122206c3fb27SDimitry Andric static const void *AllocationBegin(const void *p) { 122306c3fb27SDimitry Andric AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p); 122406c3fb27SDimitry Andric if (!m) 122506c3fb27SDimitry Andric return nullptr; 122606c3fb27SDimitry Andric if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) 122706c3fb27SDimitry Andric return nullptr; 122806c3fb27SDimitry Andric if (m->UsedSize() == 0) 122906c3fb27SDimitry Andric return nullptr; 123006c3fb27SDimitry Andric return (const void *)(m->Beg()); 123106c3fb27SDimitry Andric } 123206c3fb27SDimitry Andric 123368d75effSDimitry Andric // ASan allocator doesn't reserve extra bytes, so normally we would 123468d75effSDimitry Andric // just return "size". We don't want to expose our redzone sizes, etc here. 123568d75effSDimitry Andric uptr __sanitizer_get_estimated_allocated_size(uptr size) { 123668d75effSDimitry Andric return size; 123768d75effSDimitry Andric } 123868d75effSDimitry Andric 123968d75effSDimitry Andric int __sanitizer_get_ownership(const void *p) { 124068d75effSDimitry Andric uptr ptr = reinterpret_cast<uptr>(p); 124168d75effSDimitry Andric return instance.AllocationSize(ptr) > 0; 124268d75effSDimitry Andric } 124368d75effSDimitry Andric 124468d75effSDimitry Andric uptr __sanitizer_get_allocated_size(const void *p) { 124568d75effSDimitry Andric if (!p) return 0; 124668d75effSDimitry Andric uptr ptr = reinterpret_cast<uptr>(p); 124768d75effSDimitry Andric uptr allocated_size = instance.AllocationSize(ptr); 124868d75effSDimitry Andric // Die if p is not malloced or if it is already freed. 124968d75effSDimitry Andric if (allocated_size == 0) { 125068d75effSDimitry Andric GET_STACK_TRACE_FATAL_HERE; 125168d75effSDimitry Andric ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); 125268d75effSDimitry Andric } 125368d75effSDimitry Andric return allocated_size; 125468d75effSDimitry Andric } 125568d75effSDimitry Andric 125606c3fb27SDimitry Andric uptr __sanitizer_get_allocated_size_fast(const void *p) { 125706c3fb27SDimitry Andric DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); 125806c3fb27SDimitry Andric uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p)); 125906c3fb27SDimitry Andric DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); 126006c3fb27SDimitry Andric return ret; 126106c3fb27SDimitry Andric } 126206c3fb27SDimitry Andric 126306c3fb27SDimitry Andric const void *__sanitizer_get_allocated_begin(const void *p) { 126406c3fb27SDimitry Andric return AllocationBegin(p); 126506c3fb27SDimitry Andric } 126606c3fb27SDimitry Andric 126768d75effSDimitry Andric void __sanitizer_purge_allocator() { 126868d75effSDimitry Andric GET_STACK_TRACE_MALLOC; 126968d75effSDimitry Andric instance.Purge(&stack); 127068d75effSDimitry Andric } 127168d75effSDimitry Andric 1272480093f4SDimitry Andric int __asan_update_allocation_context(void* addr) { 1273480093f4SDimitry Andric GET_STACK_TRACE_MALLOC; 1274480093f4SDimitry Andric return instance.UpdateAllocationStack((uptr)addr, &stack); 1275480093f4SDimitry Andric } 1276