xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_allocator.cc (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 //===-- sanitizer_allocator.cc --------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries.
10 // This allocator is used inside run-times.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_allocator.h"
13 #include "sanitizer_allocator_internal.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_flags.h"
16 
17 namespace __sanitizer {
18 
19 // ThreadSanitizer for Go uses libc malloc/free.
20 #if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
21 # if SANITIZER_LINUX && !SANITIZER_ANDROID
22 extern "C" void *__libc_malloc(uptr size);
23 extern "C" void __libc_free(void *ptr);
24 #  define LIBC_MALLOC __libc_malloc
25 #  define LIBC_FREE __libc_free
26 # else
27 #  include <stdlib.h>
28 #  define LIBC_MALLOC malloc
29 #  define LIBC_FREE free
30 # endif
31 
32 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
33   (void)cache;
34   return LIBC_MALLOC(size);
35 }
36 
37 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
38   (void)cache;
39   LIBC_FREE(ptr);
40 }
41 
42 InternalAllocator *internal_allocator() {
43   return 0;
44 }
45 
46 #else  // SANITIZER_GO
47 
48 static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
49 static atomic_uint8_t internal_allocator_initialized;
50 static StaticSpinMutex internal_alloc_init_mu;
51 
52 static InternalAllocatorCache internal_allocator_cache;
53 static StaticSpinMutex internal_allocator_cache_mu;
54 
55 InternalAllocator *internal_allocator() {
56   InternalAllocator *internal_allocator_instance =
57       reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
58   if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
59     SpinMutexLock l(&internal_alloc_init_mu);
60     if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
61         0) {
62       internal_allocator_instance->Init();
63       atomic_store(&internal_allocator_initialized, 1, memory_order_release);
64     }
65   }
66   return internal_allocator_instance;
67 }
68 
69 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
70   if (cache == 0) {
71     SpinMutexLock l(&internal_allocator_cache_mu);
72     return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
73                                           false);
74   }
75   return internal_allocator()->Allocate(cache, size, 8, false);
76 }
77 
78 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
79   if (cache == 0) {
80     SpinMutexLock l(&internal_allocator_cache_mu);
81     return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
82   }
83   internal_allocator()->Deallocate(cache, ptr);
84 }
85 
86 #endif  // SANITIZER_GO
87 
88 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
89 
90 void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
91   if (size + sizeof(u64) < size)
92     return 0;
93   void *p = RawInternalAlloc(size + sizeof(u64), cache);
94   if (p == 0)
95     return 0;
96   ((u64*)p)[0] = kBlockMagic;
97   return (char*)p + sizeof(u64);
98 }
99 
100 void InternalFree(void *addr, InternalAllocatorCache *cache) {
101   if (addr == 0)
102     return;
103   addr = (char*)addr - sizeof(u64);
104   CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
105   ((u64*)addr)[0] = 0;
106   RawInternalFree(addr, cache);
107 }
108 
109 // LowLevelAllocator
110 static LowLevelAllocateCallback low_level_alloc_callback;
111 
112 void *LowLevelAllocator::Allocate(uptr size) {
113   // Align allocation size.
114   size = RoundUpTo(size, 8);
115   if (allocated_end_ - allocated_current_ < (sptr)size) {
116     uptr size_to_allocate = Max(size, GetPageSizeCached());
117     allocated_current_ =
118         (char*)MmapOrDie(size_to_allocate, __func__);
119     allocated_end_ = allocated_current_ + size_to_allocate;
120     if (low_level_alloc_callback) {
121       low_level_alloc_callback((uptr)allocated_current_,
122                                size_to_allocate);
123     }
124   }
125   CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
126   void *res = allocated_current_;
127   allocated_current_ += size;
128   return res;
129 }
130 
131 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
132   low_level_alloc_callback = callback;
133 }
134 
135 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
136   if (!size) return false;
137   uptr max = (uptr)-1L;
138   return (max / size) < n;
139 }
140 
141 void *AllocatorReturnNull() {
142   if (common_flags()->allocator_may_return_null)
143     return 0;
144   Report("%s's allocator is terminating the process instead of returning 0\n",
145          SanitizerToolName);
146   Report("If you don't like this behavior set allocator_may_return_null=1\n");
147   CHECK(0);
148   return 0;
149 }
150 
151 }  // namespace __sanitizer
152