xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_allocator.cc (revision c0a68be459da21030695f60d10265c2fc49758f8)
1 //===-- sanitizer_allocator.cc --------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries.
10 // This allocator is used inside run-times.
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_allocator.h"
14 
15 #include "sanitizer_allocator_checks.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_common.h"
19 
20 namespace __sanitizer {
21 
22 // Default allocator names.
23 const char *PrimaryAllocatorName = "SizeClassAllocator";
24 const char *SecondaryAllocatorName = "LargeMmapAllocator";
25 
26 // ThreadSanitizer for Go uses libc malloc/free.
27 #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
28 # if SANITIZER_LINUX && !SANITIZER_ANDROID
29 extern "C" void *__libc_malloc(uptr size);
30 #  if !SANITIZER_GO
31 extern "C" void *__libc_memalign(uptr alignment, uptr size);
32 #  endif
33 extern "C" void *__libc_realloc(void *ptr, uptr size);
34 extern "C" void __libc_free(void *ptr);
35 # else
36 #  include <stdlib.h>
37 #  define __libc_malloc malloc
38 #  if !SANITIZER_GO
__libc_memalign(uptr alignment,uptr size)39 static void *__libc_memalign(uptr alignment, uptr size) {
40   void *p;
41   uptr error = posix_memalign(&p, alignment, size);
42   if (error) return nullptr;
43   return p;
44 }
45 #  endif
46 #  define __libc_realloc realloc
47 #  define __libc_free free
48 # endif
49 
RawInternalAlloc(uptr size,InternalAllocatorCache * cache,uptr alignment)50 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
51                               uptr alignment) {
52   (void)cache;
53 #if !SANITIZER_GO
54   if (alignment == 0)
55     return __libc_malloc(size);
56   else
57     return __libc_memalign(alignment, size);
58 #else
59   // Windows does not provide __libc_memalign/posix_memalign. It provides
60   // __aligned_malloc, but the allocated blocks can't be passed to free,
61   // they need to be passed to __aligned_free. InternalAlloc interface does
62   // not account for such requirement. Alignemnt does not seem to be used
63   // anywhere in runtime, so just call __libc_malloc for now.
64   DCHECK_EQ(alignment, 0);
65   return __libc_malloc(size);
66 #endif
67 }
68 
RawInternalRealloc(void * ptr,uptr size,InternalAllocatorCache * cache)69 static void *RawInternalRealloc(void *ptr, uptr size,
70                                 InternalAllocatorCache *cache) {
71   (void)cache;
72   return __libc_realloc(ptr, size);
73 }
74 
RawInternalFree(void * ptr,InternalAllocatorCache * cache)75 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
76   (void)cache;
77   __libc_free(ptr);
78 }
79 
internal_allocator()80 InternalAllocator *internal_allocator() {
81   return 0;
82 }
83 
84 #else  // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
85 
86 static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
87 static atomic_uint8_t internal_allocator_initialized;
88 static StaticSpinMutex internal_alloc_init_mu;
89 
90 static InternalAllocatorCache internal_allocator_cache;
91 static StaticSpinMutex internal_allocator_cache_mu;
92 
internal_allocator()93 InternalAllocator *internal_allocator() {
94   InternalAllocator *internal_allocator_instance =
95       reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
96   if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
97     SpinMutexLock l(&internal_alloc_init_mu);
98     if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
99         0) {
100       internal_allocator_instance->Init(kReleaseToOSIntervalNever);
101       atomic_store(&internal_allocator_initialized, 1, memory_order_release);
102     }
103   }
104   return internal_allocator_instance;
105 }
106 
RawInternalAlloc(uptr size,InternalAllocatorCache * cache,uptr alignment)107 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
108                               uptr alignment) {
109   if (alignment == 0) alignment = 8;
110   if (cache == 0) {
111     SpinMutexLock l(&internal_allocator_cache_mu);
112     return internal_allocator()->Allocate(&internal_allocator_cache, size,
113                                           alignment);
114   }
115   return internal_allocator()->Allocate(cache, size, alignment);
116 }
117 
RawInternalRealloc(void * ptr,uptr size,InternalAllocatorCache * cache)118 static void *RawInternalRealloc(void *ptr, uptr size,
119                                 InternalAllocatorCache *cache) {
120   uptr alignment = 8;
121   if (cache == 0) {
122     SpinMutexLock l(&internal_allocator_cache_mu);
123     return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
124                                             size, alignment);
125   }
126   return internal_allocator()->Reallocate(cache, ptr, size, alignment);
127 }
128 
RawInternalFree(void * ptr,InternalAllocatorCache * cache)129 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
130   if (!cache) {
131     SpinMutexLock l(&internal_allocator_cache_mu);
132     return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
133   }
134   internal_allocator()->Deallocate(cache, ptr);
135 }
136 
137 #endif  // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
138 
139 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
140 
ReportInternalAllocatorOutOfMemory(uptr requested_size)141 static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
142   SetAllocatorOutOfMemory();
143   Report("FATAL: %s: internal allocator is out of memory trying to allocate "
144          "0x%zx bytes\n", SanitizerToolName, requested_size);
145   Die();
146 }
147 
InternalAlloc(uptr size,InternalAllocatorCache * cache,uptr alignment)148 void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
149   if (size + sizeof(u64) < size)
150     return nullptr;
151   void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
152   if (UNLIKELY(!p))
153     ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
154   ((u64*)p)[0] = kBlockMagic;
155   return (char*)p + sizeof(u64);
156 }
157 
InternalRealloc(void * addr,uptr size,InternalAllocatorCache * cache)158 void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
159   if (!addr)
160     return InternalAlloc(size, cache);
161   if (size + sizeof(u64) < size)
162     return nullptr;
163   addr = (char*)addr - sizeof(u64);
164   size = size + sizeof(u64);
165   CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
166   void *p = RawInternalRealloc(addr, size, cache);
167   if (UNLIKELY(!p))
168     ReportInternalAllocatorOutOfMemory(size);
169   return (char*)p + sizeof(u64);
170 }
171 
InternalCalloc(uptr count,uptr size,InternalAllocatorCache * cache)172 void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
173   if (UNLIKELY(CheckForCallocOverflow(count, size))) {
174     Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
175            "cannot be represented in type size_t\n", SanitizerToolName, count,
176            size);
177     Die();
178   }
179   void *p = InternalAlloc(count * size, cache);
180   if (LIKELY(p))
181     internal_memset(p, 0, count * size);
182   return p;
183 }
184 
InternalFree(void * addr,InternalAllocatorCache * cache)185 void InternalFree(void *addr, InternalAllocatorCache *cache) {
186   if (!addr)
187     return;
188   addr = (char*)addr - sizeof(u64);
189   CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
190   ((u64*)addr)[0] = 0;
191   RawInternalFree(addr, cache);
192 }
193 
194 // LowLevelAllocator
195 constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
196 static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
197 static LowLevelAllocateCallback low_level_alloc_callback;
198 
Allocate(uptr size)199 void *LowLevelAllocator::Allocate(uptr size) {
200   // Align allocation size.
201   size = RoundUpTo(size, low_level_alloc_min_alignment);
202   if (allocated_end_ - allocated_current_ < (sptr)size) {
203     uptr size_to_allocate = Max(size, GetPageSizeCached());
204     allocated_current_ =
205         (char*)MmapOrDie(size_to_allocate, __func__);
206     allocated_end_ = allocated_current_ + size_to_allocate;
207     if (low_level_alloc_callback) {
208       low_level_alloc_callback((uptr)allocated_current_,
209                                size_to_allocate);
210     }
211   }
212   CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
213   void *res = allocated_current_;
214   allocated_current_ += size;
215   return res;
216 }
217 
SetLowLevelAllocateMinAlignment(uptr alignment)218 void SetLowLevelAllocateMinAlignment(uptr alignment) {
219   CHECK(IsPowerOfTwo(alignment));
220   low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
221 }
222 
SetLowLevelAllocateCallback(LowLevelAllocateCallback callback)223 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
224   low_level_alloc_callback = callback;
225 }
226 
227 // Allocator's OOM and other errors handling support.
228 
229 static atomic_uint8_t allocator_out_of_memory = {0};
230 static atomic_uint8_t allocator_may_return_null = {0};
231 
IsAllocatorOutOfMemory()232 bool IsAllocatorOutOfMemory() {
233   return atomic_load_relaxed(&allocator_out_of_memory);
234 }
235 
SetAllocatorOutOfMemory()236 void SetAllocatorOutOfMemory() {
237   atomic_store_relaxed(&allocator_out_of_memory, 1);
238 }
239 
AllocatorMayReturnNull()240 bool AllocatorMayReturnNull() {
241   return atomic_load(&allocator_may_return_null, memory_order_relaxed);
242 }
243 
SetAllocatorMayReturnNull(bool may_return_null)244 void SetAllocatorMayReturnNull(bool may_return_null) {
245   atomic_store(&allocator_may_return_null, may_return_null,
246                memory_order_relaxed);
247 }
248 
PrintHintAllocatorCannotReturnNull()249 void PrintHintAllocatorCannotReturnNull() {
250   Report("HINT: if you don't care about these errors you may set "
251          "allocator_may_return_null=1\n");
252 }
253 
254 } // namespace __sanitizer
255