xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/lsan/lsan_allocator.cc (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1 //=-- lsan_allocator.cc ---------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of LeakSanitizer.
9 // See lsan_allocator.h for details.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "lsan_allocator.h"
14 
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_checks.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_errno.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "lsan_common.h"
23 
24 extern "C" void *memset(void *ptr, int value, uptr num);
25 
26 namespace __lsan {
27 #if defined(__i386__) || defined(__arm__) || ((defined(__sparc__) || defined(__powerpc__)) && !defined(_LP64))
28 static const uptr kMaxAllowedMallocSize = 1UL << 30;
29 #elif defined(__mips64) || defined(__aarch64__)
30 static const uptr kMaxAllowedMallocSize = 4UL << 30;
31 #elif _LP64
32 static const uptr kMaxAllowedMallocSize = 8UL << 30;
33 #else
34 static const uptr kMaxAllowedMallocSize = 8UL << 20;
35 #endif
36 typedef LargeMmapAllocator<> SecondaryAllocator;
37 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
38           SecondaryAllocator> Allocator;
39 
40 static Allocator allocator;
41 
42 void InitializeAllocator() {
43   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
44   allocator.InitLinkerInitialized(
45       common_flags()->allocator_release_to_os_interval_ms);
46 }
47 
48 void AllocatorThreadFinish() {
49   allocator.SwallowCache(GetAllocatorCache());
50 }
51 
52 static ChunkMetadata *Metadata(const void *p) {
53   return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
54 }
55 
56 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
57   if (!p) return;
58   ChunkMetadata *m = Metadata(p);
59   CHECK(m);
60   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
61   m->stack_trace_id = StackDepotPut(stack);
62   m->requested_size = size;
63   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
64 }
65 
66 static void RegisterDeallocation(void *p) {
67   if (!p) return;
68   ChunkMetadata *m = Metadata(p);
69   CHECK(m);
70   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
71 }
72 
73 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
74                bool cleared) {
75   if (size == 0)
76     size = 1;
77   if (size > kMaxAllowedMallocSize) {
78     Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
79     return Allocator::FailureHandler::OnBadRequest();
80   }
81   void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
82   // Do not rely on the allocator to clear the memory (it's slow).
83   if (cleared && allocator.FromPrimary(p))
84     memset(p, 0, size);
85   RegisterAllocation(stack, p, size);
86   if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
87   RunMallocHooks(p, size);
88   return p;
89 }
90 
91 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
92   if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
93     return Allocator::FailureHandler::OnBadRequest();
94   size *= nmemb;
95   return Allocate(stack, size, 1, true);
96 }
97 
98 void Deallocate(void *p) {
99   if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
100   RunFreeHooks(p);
101   RegisterDeallocation(p);
102   allocator.Deallocate(GetAllocatorCache(), p);
103 }
104 
105 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
106                  uptr alignment) {
107   RegisterDeallocation(p);
108   if (new_size > kMaxAllowedMallocSize) {
109     Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
110     allocator.Deallocate(GetAllocatorCache(), p);
111     return Allocator::FailureHandler::OnBadRequest();
112   }
113   p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
114   RegisterAllocation(stack, p, new_size);
115   return p;
116 }
117 
118 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
119   *begin = (uptr)GetAllocatorCache();
120   *end = *begin + sizeof(AllocatorCache);
121 }
122 
123 uptr GetMallocUsableSize(const void *p) {
124   ChunkMetadata *m = Metadata(p);
125   if (!m) return 0;
126   return m->requested_size;
127 }
128 
129 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
130   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
131     errno = errno_EINVAL;
132     return Allocator::FailureHandler::OnBadRequest();
133   }
134   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
135 }
136 
137 void *lsan_malloc(uptr size, const StackTrace &stack) {
138   return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
139 }
140 
141 void lsan_free(void *p) {
142   Deallocate(p);
143 }
144 
145 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
146   return SetErrnoOnNull(Reallocate(stack, p, size, 1));
147 }
148 
149 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
150   return SetErrnoOnNull(Calloc(nmemb, size, stack));
151 }
152 
153 void *lsan_valloc(uptr size, const StackTrace &stack) {
154   return SetErrnoOnNull(
155       Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
156 }
157 
158 uptr lsan_mz_size(const void *p) {
159   return GetMallocUsableSize(p);
160 }
161 
162 ///// Interface to the common LSan module. /////
163 
164 void LockAllocator() {
165   allocator.ForceLock();
166 }
167 
168 void UnlockAllocator() {
169   allocator.ForceUnlock();
170 }
171 
172 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
173   *begin = (uptr)&allocator;
174   *end = *begin + sizeof(allocator);
175 }
176 
177 uptr PointsIntoChunk(void* p) {
178   uptr addr = reinterpret_cast<uptr>(p);
179   uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
180   if (!chunk) return 0;
181   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
182   // valid, but we don't want that.
183   if (addr < chunk) return 0;
184   ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
185   CHECK(m);
186   if (!m->allocated)
187     return 0;
188   if (addr < chunk + m->requested_size)
189     return chunk;
190   if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
191     return chunk;
192   return 0;
193 }
194 
195 uptr GetUserBegin(uptr chunk) {
196   return chunk;
197 }
198 
199 LsanMetadata::LsanMetadata(uptr chunk) {
200   metadata_ = Metadata(reinterpret_cast<void *>(chunk));
201   CHECK(metadata_);
202 }
203 
204 bool LsanMetadata::allocated() const {
205   return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
206 }
207 
208 ChunkTag LsanMetadata::tag() const {
209   return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
210 }
211 
212 void LsanMetadata::set_tag(ChunkTag value) {
213   reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
214 }
215 
216 uptr LsanMetadata::requested_size() const {
217   return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
218 }
219 
220 u32 LsanMetadata::stack_trace_id() const {
221   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
222 }
223 
224 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
225   allocator.ForEachChunk(callback, arg);
226 }
227 
228 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
229   void *chunk = allocator.GetBlockBegin(p);
230   if (!chunk || p < chunk) return kIgnoreObjectInvalid;
231   ChunkMetadata *m = Metadata(chunk);
232   CHECK(m);
233   if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
234     if (m->tag == kIgnored)
235       return kIgnoreObjectAlreadyIgnored;
236     m->tag = kIgnored;
237     return kIgnoreObjectSuccess;
238   } else {
239     return kIgnoreObjectInvalid;
240   }
241 }
242 } // namespace __lsan
243 
244 using namespace __lsan;
245 
246 extern "C" {
247 SANITIZER_INTERFACE_ATTRIBUTE
248 uptr __sanitizer_get_current_allocated_bytes() {
249   uptr stats[AllocatorStatCount];
250   allocator.GetStats(stats);
251   return stats[AllocatorStatAllocated];
252 }
253 
254 SANITIZER_INTERFACE_ATTRIBUTE
255 uptr __sanitizer_get_heap_size() {
256   uptr stats[AllocatorStatCount];
257   allocator.GetStats(stats);
258   return stats[AllocatorStatMapped];
259 }
260 
261 SANITIZER_INTERFACE_ATTRIBUTE
262 uptr __sanitizer_get_free_bytes() { return 0; }
263 
264 SANITIZER_INTERFACE_ATTRIBUTE
265 uptr __sanitizer_get_unmapped_bytes() { return 0; }
266 
267 SANITIZER_INTERFACE_ATTRIBUTE
268 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
269 
270 SANITIZER_INTERFACE_ATTRIBUTE
271 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
272 
273 SANITIZER_INTERFACE_ATTRIBUTE
274 uptr __sanitizer_get_allocated_size(const void *p) {
275   return GetMallocUsableSize(p);
276 }
277 
278 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
279 // Provide default (no-op) implementation of malloc hooks.
280 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
281 void __sanitizer_malloc_hook(void *ptr, uptr size) {
282   (void)ptr;
283   (void)size;
284 }
285 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
286 void __sanitizer_free_hook(void *ptr) {
287   (void)ptr;
288 }
289 #endif
290 } // extern "C"
291