xref: /llvm-project/compiler-rt/lib/msan/msan_allocator.cpp (revision 16f4e85860efcccbadca5d0a00a3872244efae08)
1 //===-- msan_allocator.cpp -------------------------- ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemorySanitizer.
10 //
11 // MemorySanitizer allocator.
12 //===----------------------------------------------------------------------===//
13 
14 #include "msan_allocator.h"
15 
16 #include "msan.h"
17 #include "msan_interface_internal.h"
18 #include "msan_origin.h"
19 #include "msan_poisoning.h"
20 #include "msan_thread.h"
21 #include "sanitizer_common/sanitizer_allocator.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_allocator_report.h"
25 #include "sanitizer_common/sanitizer_errno.h"
26 
27 using namespace __msan;
28 
29 namespace {
30 struct Metadata {
31   uptr requested_size;
32 };
33 
34 struct MsanMapUnmapCallback {
35   void OnMap(uptr p, uptr size) const {}
36   void OnMapSecondary(uptr p, uptr size, uptr user_begin,
37                       uptr user_size) const {}
38   void OnUnmap(uptr p, uptr size) const {
39     __msan_unpoison((void *)p, size);
40 
41     // We are about to unmap a chunk of user memory.
42     // Mark the corresponding shadow memory as not needed.
43     uptr shadow_p = MEM_TO_SHADOW(p);
44     ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
45     if (__msan_get_track_origins()) {
46       uptr origin_p = MEM_TO_ORIGIN(p);
47       ReleaseMemoryPagesToOS(origin_p, origin_p + size);
48     }
49   }
50 };
51 
52 // Note: to ensure that the allocator is compatible with the application memory
53 // layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
54 // duplicated as MappingDesc::ALLOCATOR in msan.h.
55 #if defined(__mips64)
56 const uptr kMaxAllowedMallocSize = 2UL << 30;
57 
58 struct AP32 {
59   static const uptr kSpaceBeg = 0;
60   static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
61   static const uptr kMetadataSize = sizeof(Metadata);
62   using SizeClassMap = __sanitizer::CompactSizeClassMap;
63   static const uptr kRegionSizeLog = 20;
64   using AddressSpaceView = LocalAddressSpaceView;
65   using MapUnmapCallback = MsanMapUnmapCallback;
66   static const uptr kFlags = 0;
67 };
68 using PrimaryAllocator = SizeClassAllocator32<AP32>;
69 #elif defined(__x86_64__)
70 #if SANITIZER_NETBSD || SANITIZER_LINUX
71 const uptr kAllocatorSpace = 0x700000000000ULL;
72 #else
73 const uptr kAllocatorSpace = 0x600000000000ULL;
74 #endif
75 const uptr kMaxAllowedMallocSize = 1ULL << 40;
76 
77 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
78   static const uptr kSpaceBeg = kAllocatorSpace;
79   static const uptr kSpaceSize = 0x40000000000;  // 4T.
80   static const uptr kMetadataSize = sizeof(Metadata);
81   using SizeClassMap = DefaultSizeClassMap;
82   using MapUnmapCallback = MsanMapUnmapCallback;
83   static const uptr kFlags = 0;
84   using AddressSpaceView = LocalAddressSpaceView;
85 };
86 
87 using PrimaryAllocator = SizeClassAllocator64<AP64>;
88 
89 #elif defined(__loongarch_lp64)
90 const uptr kAllocatorSpace = 0x700000000000ULL;
91 const uptr kMaxAllowedMallocSize = 8UL << 30;
92 
93 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
94   static const uptr kSpaceBeg = kAllocatorSpace;
95   static const uptr kSpaceSize = 0x40000000000;  // 4T.
96   static const uptr kMetadataSize = sizeof(Metadata);
97   using SizeClassMap = DefaultSizeClassMap;
98   using MapUnmapCallback = MsanMapUnmapCallback;
99   static const uptr kFlags = 0;
100   using AddressSpaceView = LocalAddressSpaceView;
101 };
102 
103 using PrimaryAllocator = SizeClassAllocator64<AP64>;
104 
105 #elif defined(__powerpc64__)
106 const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
107 
108 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
109   static const uptr kSpaceBeg = 0x300000000000;
110   static const uptr kSpaceSize = 0x020000000000;  // 2T.
111   static const uptr kMetadataSize = sizeof(Metadata);
112   using SizeClassMap = DefaultSizeClassMap;
113   using MapUnmapCallback = MsanMapUnmapCallback;
114   static const uptr kFlags = 0;
115   using AddressSpaceView = LocalAddressSpaceView;
116 };
117 
118 using PrimaryAllocator = SizeClassAllocator64<AP64>;
119 #elif defined(__s390x__)
120 const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
121 
122 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
123   static const uptr kSpaceBeg = 0x440000000000;
124   static const uptr kSpaceSize = 0x020000000000;  // 2T.
125   static const uptr kMetadataSize = sizeof(Metadata);
126   using SizeClassMap = DefaultSizeClassMap;
127   using MapUnmapCallback = MsanMapUnmapCallback;
128   static const uptr kFlags = 0;
129   using AddressSpaceView = LocalAddressSpaceView;
130 };
131 
132 using PrimaryAllocator = SizeClassAllocator64<AP64>;
133 #elif defined(__aarch64__)
134 const uptr kMaxAllowedMallocSize = 8UL << 30;
135 
136 struct AP64 {
137   static const uptr kSpaceBeg = 0xE00000000000ULL;
138   static const uptr kSpaceSize = 0x40000000000;  // 4T.
139   static const uptr kMetadataSize = sizeof(Metadata);
140   using SizeClassMap = DefaultSizeClassMap;
141   using MapUnmapCallback = MsanMapUnmapCallback;
142   static const uptr kFlags = 0;
143   using AddressSpaceView = LocalAddressSpaceView;
144 };
145 using PrimaryAllocator = SizeClassAllocator64<AP64>;
146 #endif
147 using Allocator = CombinedAllocator<PrimaryAllocator>;
148 using AllocatorCache = Allocator::AllocatorCache;
149 }  // namespace __msan
150 
151 static Allocator allocator;
152 static AllocatorCache fallback_allocator_cache;
153 static StaticSpinMutex fallback_mutex;
154 
155 static uptr max_malloc_size;
156 
157 void __msan::MsanAllocatorInit() {
158   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
159   allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
160   if (common_flags()->max_allocation_size_mb)
161     max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
162                           kMaxAllowedMallocSize);
163   else
164     max_malloc_size = kMaxAllowedMallocSize;
165 }
166 
167 void __msan::LockAllocator() { allocator.ForceLock(); }
168 
169 void __msan::UnlockAllocator() { allocator.ForceUnlock(); }
170 
171 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
172   CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
173   return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
174 }
175 
176 void MsanThreadLocalMallocStorage::Init() {
177   allocator.InitCache(GetAllocatorCache(this));
178 }
179 
180 void MsanThreadLocalMallocStorage::CommitBack() {
181   allocator.SwallowCache(GetAllocatorCache(this));
182   allocator.DestroyCache(GetAllocatorCache(this));
183 }
184 
185 static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
186                           bool zero) {
187   if (UNLIKELY(size > max_malloc_size)) {
188     if (AllocatorMayReturnNull()) {
189       Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
190       return nullptr;
191     }
192     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
193     ReportAllocationSizeTooBig(size, max_malloc_size, stack);
194   }
195   if (UNLIKELY(IsRssLimitExceeded())) {
196     if (AllocatorMayReturnNull())
197       return nullptr;
198     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
199     ReportRssLimitExceeded(stack);
200   }
201   MsanThread *t = GetCurrentThread();
202   void *allocated;
203   if (t) {
204     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
205     allocated = allocator.Allocate(cache, size, alignment);
206   } else {
207     SpinMutexLock l(&fallback_mutex);
208     AllocatorCache *cache = &fallback_allocator_cache;
209     allocated = allocator.Allocate(cache, size, alignment);
210   }
211   if (UNLIKELY(!allocated)) {
212     SetAllocatorOutOfMemory();
213     if (AllocatorMayReturnNull())
214       return nullptr;
215     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
216     ReportOutOfMemory(size, stack);
217   }
218   auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
219   meta->requested_size = size;
220   if (zero) {
221     if (allocator.FromPrimary(allocated))
222       __msan_clear_and_unpoison(allocated, size);
223     else
224       __msan_unpoison(allocated, size);  // Mem is already zeroed.
225   } else if (flags()->poison_in_malloc) {
226     __msan_poison(allocated, size);
227     if (__msan_get_track_origins()) {
228       stack->tag = StackTrace::TAG_ALLOC;
229       Origin o = Origin::CreateHeapOrigin(stack);
230       __msan_set_origin(allocated, size, o.raw_id());
231     }
232   }
233   UnpoisonParam(2);
234   RunMallocHooks(allocated, size);
235   return allocated;
236 }
237 
238 void __msan::MsanDeallocate(BufferedStackTrace *stack, void *p) {
239   DCHECK(p);
240   UnpoisonParam(1);
241   RunFreeHooks(p);
242 
243   Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
244   uptr size = meta->requested_size;
245   meta->requested_size = 0;
246   // This memory will not be reused by anyone else, so we are free to keep it
247   // poisoned. The secondary allocator will unmap and unpoison by
248   // MsanMapUnmapCallback, no need to poison it here.
249   if (flags()->poison_in_free && allocator.FromPrimary(p)) {
250     __msan_poison(p, size);
251     if (__msan_get_track_origins()) {
252       stack->tag = StackTrace::TAG_DEALLOC;
253       Origin o = Origin::CreateHeapOrigin(stack);
254       __msan_set_origin(p, size, o.raw_id());
255     }
256   }
257   if (MsanThread *t = GetCurrentThread()) {
258     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
259     allocator.Deallocate(cache, p);
260   } else {
261     SpinMutexLock l(&fallback_mutex);
262     AllocatorCache *cache = &fallback_allocator_cache;
263     allocator.Deallocate(cache, p);
264   }
265 }
266 
267 static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
268                             uptr new_size, uptr alignment) {
269   Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
270   uptr old_size = meta->requested_size;
271   uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
272   if (new_size <= actually_allocated_size) {
273     // We are not reallocating here.
274     meta->requested_size = new_size;
275     if (new_size > old_size) {
276       if (flags()->poison_in_malloc) {
277         stack->tag = StackTrace::TAG_ALLOC;
278         PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
279       }
280     }
281     return old_p;
282   }
283   uptr memcpy_size = Min(new_size, old_size);
284   void *new_p = MsanAllocate(stack, new_size, alignment, false);
285   if (new_p) {
286     CopyMemory(new_p, old_p, memcpy_size, stack);
287     MsanDeallocate(stack, old_p);
288   }
289   return new_p;
290 }
291 
292 static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
293   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
294     if (AllocatorMayReturnNull())
295       return nullptr;
296     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
297     ReportCallocOverflow(nmemb, size, stack);
298   }
299   return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
300 }
301 
302 static const void *AllocationBegin(const void *p) {
303   if (!p)
304     return nullptr;
305   void *beg = allocator.GetBlockBegin(p);
306   if (!beg)
307     return nullptr;
308   auto *b = reinterpret_cast<Metadata *>(allocator.GetMetaData(beg));
309   if (!b)
310     return nullptr;
311   if (b->requested_size == 0)
312     return nullptr;
313 
314   return beg;
315 }
316 
317 static uptr AllocationSizeFast(const void *p) {
318   return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
319 }
320 
321 static uptr AllocationSize(const void *p) {
322   if (!p)
323     return 0;
324   if (allocator.GetBlockBegin(p) != p)
325     return 0;
326   return AllocationSizeFast(p);
327 }
328 
329 void *__msan::msan_malloc(uptr size, BufferedStackTrace *stack) {
330   return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
331 }
332 
333 void *__msan::msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
334   return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
335 }
336 
337 void *__msan::msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
338   if (!ptr)
339     return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
340   if (size == 0) {
341     MsanDeallocate(stack, ptr);
342     return nullptr;
343   }
344   return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
345 }
346 
347 void *__msan::msan_reallocarray(void *ptr, uptr nmemb, uptr size,
348                                 BufferedStackTrace *stack) {
349   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
350     errno = errno_ENOMEM;
351     if (AllocatorMayReturnNull())
352       return nullptr;
353     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
354     ReportReallocArrayOverflow(nmemb, size, stack);
355   }
356   return msan_realloc(ptr, nmemb * size, stack);
357 }
358 
359 void *__msan::msan_valloc(uptr size, BufferedStackTrace *stack) {
360   return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
361 }
362 
363 void *__msan::msan_pvalloc(uptr size, BufferedStackTrace *stack) {
364   uptr PageSize = GetPageSizeCached();
365   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
366     errno = errno_ENOMEM;
367     if (AllocatorMayReturnNull())
368       return nullptr;
369     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
370     ReportPvallocOverflow(size, stack);
371   }
372   // pvalloc(0) should allocate one page.
373   size = size ? RoundUpTo(size, PageSize) : PageSize;
374   return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
375 }
376 
377 void *__msan::msan_aligned_alloc(uptr alignment, uptr size,
378                                  BufferedStackTrace *stack) {
379   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
380     errno = errno_EINVAL;
381     if (AllocatorMayReturnNull())
382       return nullptr;
383     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
384     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
385   }
386   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
387 }
388 
389 void *__msan::msan_memalign(uptr alignment, uptr size,
390                             BufferedStackTrace *stack) {
391   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
392     errno = errno_EINVAL;
393     if (AllocatorMayReturnNull())
394       return nullptr;
395     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
396     ReportInvalidAllocationAlignment(alignment, stack);
397   }
398   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
399 }
400 
401 int __msan::msan_posix_memalign(void **memptr, uptr alignment, uptr size,
402                                 BufferedStackTrace *stack) {
403   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
404     if (AllocatorMayReturnNull())
405       return errno_EINVAL;
406     GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
407     ReportInvalidPosixMemalignAlignment(alignment, stack);
408   }
409   void *ptr = MsanAllocate(stack, size, alignment, false);
410   if (UNLIKELY(!ptr))
411     // OOM error is already taken care of by MsanAllocate.
412     return errno_ENOMEM;
413   CHECK(IsAligned((uptr)ptr, alignment));
414   *memptr = ptr;
415   return 0;
416 }
417 
418 extern "C" {
419 uptr __sanitizer_get_current_allocated_bytes() {
420   uptr stats[AllocatorStatCount];
421   allocator.GetStats(stats);
422   return stats[AllocatorStatAllocated];
423 }
424 
425 uptr __sanitizer_get_heap_size() {
426   uptr stats[AllocatorStatCount];
427   allocator.GetStats(stats);
428   return stats[AllocatorStatMapped];
429 }
430 
431 uptr __sanitizer_get_free_bytes() { return 1; }
432 
433 uptr __sanitizer_get_unmapped_bytes() { return 1; }
434 
435 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
436 
437 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
438 
439 const void *__sanitizer_get_allocated_begin(const void *p) {
440   return AllocationBegin(p);
441 }
442 
443 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
444 
445 uptr __sanitizer_get_allocated_size_fast(const void *p) {
446   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
447   uptr ret = AllocationSizeFast(p);
448   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
449   return ret;
450 }
451 
452 void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
453 }
454