1 //=-- lsan_allocator.cc ---------------------------------------------------===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file is a part of LeakSanitizer. 9 // See lsan_allocator.h for details. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "lsan_allocator.h" 14 15 #include "sanitizer_common/sanitizer_allocator.h" 16 #include "sanitizer_common/sanitizer_allocator_checks.h" 17 #include "sanitizer_common/sanitizer_allocator_interface.h" 18 #include "sanitizer_common/sanitizer_allocator_report.h" 19 #include "sanitizer_common/sanitizer_errno.h" 20 #include "sanitizer_common/sanitizer_internal_defs.h" 21 #include "sanitizer_common/sanitizer_stackdepot.h" 22 #include "sanitizer_common/sanitizer_stacktrace.h" 23 #include "lsan_common.h" 24 25 extern "C" void *memset(void *ptr, int value, uptr num); 26 27 namespace __lsan { 28 #if defined(__i386__) || defined(__arm__) || ((defined(__sparc__) || defined(__powerpc__)) && !defined(_LP64)) 29 static const uptr kMaxAllowedMallocSize = 1UL << 30; 30 #elif defined(__mips64) || defined(__aarch64__) 31 static const uptr kMaxAllowedMallocSize = 4UL << 30; 32 #elif _LP64 33 static const uptr kMaxAllowedMallocSize = 8UL << 30; 34 #else 35 static const uptr kMaxAllowedMallocSize = 8UL << 20; 36 #endif 37 typedef LargeMmapAllocator<> SecondaryAllocator; 38 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 39 SecondaryAllocator> Allocator; 40 41 static Allocator allocator; 42 43 void InitializeAllocator() { 44 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 45 allocator.InitLinkerInitialized( 46 common_flags()->allocator_release_to_os_interval_ms); 47 } 48 49 void AllocatorThreadFinish() { 50 allocator.SwallowCache(GetAllocatorCache()); 51 } 52 53 static ChunkMetadata *Metadata(const void *p) { 54 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); 55 } 56 57 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 58 if (!p) return; 59 ChunkMetadata *m = Metadata(p); 60 CHECK(m); 61 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 62 m->stack_trace_id = StackDepotPut(stack); 63 m->requested_size = size; 64 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); 65 } 66 67 static void RegisterDeallocation(void *p) { 68 if (!p) return; 69 ChunkMetadata *m = Metadata(p); 70 CHECK(m); 71 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); 72 } 73 74 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { 75 if (AllocatorMayReturnNull()) { 76 Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); 77 return nullptr; 78 } 79 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); 80 } 81 82 void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 83 bool cleared) { 84 if (size == 0) 85 size = 1; 86 if (size > kMaxAllowedMallocSize) 87 return ReportAllocationSizeTooBig(size, stack); 88 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); 89 if (UNLIKELY(!p)) { 90 SetAllocatorOutOfMemory(); 91 if (AllocatorMayReturnNull()) 92 return nullptr; 93 ReportOutOfMemory(size, &stack); 94 } 95 // Do not rely on the allocator to clear the memory (it's slow). 96 if (cleared && allocator.FromPrimary(p)) 97 memset(p, 0, size); 98 RegisterAllocation(stack, p, size); 99 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); 100 RunMallocHooks(p, size); 101 return p; 102 } 103 104 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { 105 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 106 if (AllocatorMayReturnNull()) 107 return nullptr; 108 ReportCallocOverflow(nmemb, size, &stack); 109 } 110 size *= nmemb; 111 return Allocate(stack, size, 1, true); 112 } 113 114 void Deallocate(void *p) { 115 if (&__sanitizer_free_hook) __sanitizer_free_hook(p); 116 RunFreeHooks(p); 117 RegisterDeallocation(p); 118 allocator.Deallocate(GetAllocatorCache(), p); 119 } 120 121 void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 122 uptr alignment) { 123 RegisterDeallocation(p); 124 if (new_size > kMaxAllowedMallocSize) { 125 allocator.Deallocate(GetAllocatorCache(), p); 126 return ReportAllocationSizeTooBig(new_size, stack); 127 } 128 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); 129 RegisterAllocation(stack, p, new_size); 130 return p; 131 } 132 133 void GetAllocatorCacheRange(uptr *begin, uptr *end) { 134 *begin = (uptr)GetAllocatorCache(); 135 *end = *begin + sizeof(AllocatorCache); 136 } 137 138 uptr GetMallocUsableSize(const void *p) { 139 ChunkMetadata *m = Metadata(p); 140 if (!m) return 0; 141 return m->requested_size; 142 } 143 144 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, 145 const StackTrace &stack) { 146 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 147 if (AllocatorMayReturnNull()) 148 return errno_EINVAL; 149 ReportInvalidPosixMemalignAlignment(alignment, &stack); 150 } 151 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); 152 if (UNLIKELY(!ptr)) 153 // OOM error is already taken care of by Allocate. 154 return errno_ENOMEM; 155 CHECK(IsAligned((uptr)ptr, alignment)); 156 *memptr = ptr; 157 return 0; 158 } 159 160 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { 161 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 162 errno = errno_EINVAL; 163 if (AllocatorMayReturnNull()) 164 return nullptr; 165 ReportInvalidAlignedAllocAlignment(size, alignment, &stack); 166 } 167 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); 168 } 169 170 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { 171 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 172 errno = errno_EINVAL; 173 if (AllocatorMayReturnNull()) 174 return nullptr; 175 ReportInvalidAllocationAlignment(alignment, &stack); 176 } 177 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); 178 } 179 180 void *lsan_malloc(uptr size, const StackTrace &stack) { 181 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); 182 } 183 184 void lsan_free(void *p) { 185 Deallocate(p); 186 } 187 188 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { 189 return SetErrnoOnNull(Reallocate(stack, p, size, 1)); 190 } 191 192 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { 193 return SetErrnoOnNull(Calloc(nmemb, size, stack)); 194 } 195 196 void *lsan_valloc(uptr size, const StackTrace &stack) { 197 return SetErrnoOnNull( 198 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); 199 } 200 201 void *lsan_pvalloc(uptr size, const StackTrace &stack) { 202 uptr PageSize = GetPageSizeCached(); 203 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 204 errno = errno_ENOMEM; 205 if (AllocatorMayReturnNull()) 206 return nullptr; 207 ReportPvallocOverflow(size, &stack); 208 } 209 // pvalloc(0) should allocate one page. 210 size = size ? RoundUpTo(size, PageSize) : PageSize; 211 return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); 212 } 213 214 uptr lsan_mz_size(const void *p) { 215 return GetMallocUsableSize(p); 216 } 217 218 ///// Interface to the common LSan module. ///// 219 220 void LockAllocator() { 221 allocator.ForceLock(); 222 } 223 224 void UnlockAllocator() { 225 allocator.ForceUnlock(); 226 } 227 228 void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 229 *begin = (uptr)&allocator; 230 *end = *begin + sizeof(allocator); 231 } 232 233 uptr PointsIntoChunk(void* p) { 234 uptr addr = reinterpret_cast<uptr>(p); 235 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); 236 if (!chunk) return 0; 237 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 238 // valid, but we don't want that. 239 if (addr < chunk) return 0; 240 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 241 CHECK(m); 242 if (!m->allocated) 243 return 0; 244 if (addr < chunk + m->requested_size) 245 return chunk; 246 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 247 return chunk; 248 return 0; 249 } 250 251 uptr GetUserBegin(uptr chunk) { 252 return chunk; 253 } 254 255 LsanMetadata::LsanMetadata(uptr chunk) { 256 metadata_ = Metadata(reinterpret_cast<void *>(chunk)); 257 CHECK(metadata_); 258 } 259 260 bool LsanMetadata::allocated() const { 261 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 262 } 263 264 ChunkTag LsanMetadata::tag() const { 265 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 266 } 267 268 void LsanMetadata::set_tag(ChunkTag value) { 269 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 270 } 271 272 uptr LsanMetadata::requested_size() const { 273 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 274 } 275 276 u32 LsanMetadata::stack_trace_id() const { 277 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 278 } 279 280 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 281 allocator.ForEachChunk(callback, arg); 282 } 283 284 IgnoreObjectResult IgnoreObjectLocked(const void *p) { 285 void *chunk = allocator.GetBlockBegin(p); 286 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 287 ChunkMetadata *m = Metadata(chunk); 288 CHECK(m); 289 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 290 if (m->tag == kIgnored) 291 return kIgnoreObjectAlreadyIgnored; 292 m->tag = kIgnored; 293 return kIgnoreObjectSuccess; 294 } else { 295 return kIgnoreObjectInvalid; 296 } 297 } 298 } // namespace __lsan 299 300 using namespace __lsan; 301 302 extern "C" { 303 SANITIZER_INTERFACE_ATTRIBUTE 304 uptr __sanitizer_get_current_allocated_bytes() { 305 uptr stats[AllocatorStatCount]; 306 allocator.GetStats(stats); 307 return stats[AllocatorStatAllocated]; 308 } 309 310 SANITIZER_INTERFACE_ATTRIBUTE 311 uptr __sanitizer_get_heap_size() { 312 uptr stats[AllocatorStatCount]; 313 allocator.GetStats(stats); 314 return stats[AllocatorStatMapped]; 315 } 316 317 SANITIZER_INTERFACE_ATTRIBUTE 318 uptr __sanitizer_get_free_bytes() { return 0; } 319 320 SANITIZER_INTERFACE_ATTRIBUTE 321 uptr __sanitizer_get_unmapped_bytes() { return 0; } 322 323 SANITIZER_INTERFACE_ATTRIBUTE 324 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 325 326 SANITIZER_INTERFACE_ATTRIBUTE 327 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } 328 329 SANITIZER_INTERFACE_ATTRIBUTE 330 uptr __sanitizer_get_allocated_size(const void *p) { 331 return GetMallocUsableSize(p); 332 } 333 334 #if !SANITIZER_SUPPORTS_WEAK_HOOKS 335 // Provide default (no-op) implementation of malloc hooks. 336 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 337 void __sanitizer_malloc_hook(void *ptr, uptr size) { 338 (void)ptr; 339 (void)size; 340 } 341 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 342 void __sanitizer_free_hook(void *ptr) { 343 (void)ptr; 344 } 345 #endif 346 } // extern "C" 347