1 //===-- asan_noinst_test.cc -----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // This test file should be compiled w/o asan instrumentation.
13 //===----------------------------------------------------------------------===//
14
15 #include "asan_allocator.h"
16 #include "asan_internal.h"
17 #include "asan_mapping.h"
18 #include "asan_test_utils.h"
19 #include <sanitizer/allocator_interface.h>
20
21 #include <assert.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h> // for memset()
25 #include <algorithm>
26 #include <vector>
27 #include <limits>
28
29 using namespace __sanitizer;
30
31 // ATTENTION!
32 // Please don't call intercepted functions (including malloc() and friends)
33 // in this test. The static runtime library is linked explicitly (without
34 // -fsanitize=address), thus the interceptors do not work correctly on OS X.
35
36 // Make sure __asan_init is called before any test case is run.
37 struct AsanInitCaller {
AsanInitCallerAsanInitCaller38 AsanInitCaller() {
39 __asan_init();
40 }
41 };
42 static AsanInitCaller asan_init_caller;
43
TEST(AddressSanitizer,InternalSimpleDeathTest)44 TEST(AddressSanitizer, InternalSimpleDeathTest) {
45 EXPECT_DEATH(exit(1), "");
46 }
47
MallocStress(size_t n)48 static void MallocStress(size_t n) {
49 u32 seed = my_rand();
50 BufferedStackTrace stack1;
51 stack1.trace_buffer[0] = 0xa123;
52 stack1.trace_buffer[1] = 0xa456;
53 stack1.size = 2;
54
55 BufferedStackTrace stack2;
56 stack2.trace_buffer[0] = 0xb123;
57 stack2.trace_buffer[1] = 0xb456;
58 stack2.size = 2;
59
60 BufferedStackTrace stack3;
61 stack3.trace_buffer[0] = 0xc123;
62 stack3.trace_buffer[1] = 0xc456;
63 stack3.size = 2;
64
65 std::vector<void *> vec;
66 for (size_t i = 0; i < n; i++) {
67 if ((i % 3) == 0) {
68 if (vec.empty()) continue;
69 size_t idx = my_rand_r(&seed) % vec.size();
70 void *ptr = vec[idx];
71 vec[idx] = vec.back();
72 vec.pop_back();
73 __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
74 } else {
75 size_t size = my_rand_r(&seed) % 1000 + 1;
76 switch ((my_rand_r(&seed) % 128)) {
77 case 0: size += 1024; break;
78 case 1: size += 2048; break;
79 case 2: size += 4096; break;
80 }
81 size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
82 char *ptr = (char*)__asan::asan_memalign(alignment, size,
83 &stack2, __asan::FROM_MALLOC);
84 EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0));
85 vec.push_back(ptr);
86 ptr[0] = 0;
87 ptr[size-1] = 0;
88 ptr[size/2] = 0;
89 }
90 }
91 for (size_t i = 0; i < vec.size(); i++)
92 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
93 }
94
95
TEST(AddressSanitizer,NoInstMallocTest)96 TEST(AddressSanitizer, NoInstMallocTest) {
97 MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
98 }
99
TEST(AddressSanitizer,ThreadedMallocStressTest)100 TEST(AddressSanitizer, ThreadedMallocStressTest) {
101 const int kNumThreads = 4;
102 const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
103 pthread_t t[kNumThreads];
104 for (int i = 0; i < kNumThreads; i++) {
105 PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
106 (void*)kNumIterations);
107 }
108 for (int i = 0; i < kNumThreads; i++) {
109 PTHREAD_JOIN(t[i], 0);
110 }
111 }
112
PrintShadow(const char * tag,uptr ptr,size_t size)113 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
114 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
115 uptr prev_shadow = 0;
116 for (sptr i = -32; i < (sptr)size + 32; i++) {
117 uptr shadow = __asan::MemToShadow(ptr + i);
118 if (i == 0 || i == (sptr)size)
119 fprintf(stderr, ".");
120 if (shadow != prev_shadow) {
121 prev_shadow = shadow;
122 fprintf(stderr, "%02x", (int)*(u8*)shadow);
123 }
124 }
125 fprintf(stderr, "\n");
126 }
127
TEST(AddressSanitizer,DISABLED_InternalPrintShadow)128 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
129 for (size_t size = 1; size <= 513; size++) {
130 char *ptr = new char[size];
131 PrintShadow("m", (uptr)ptr, size);
132 delete [] ptr;
133 PrintShadow("f", (uptr)ptr, size);
134 }
135 }
136
TEST(AddressSanitizer,QuarantineTest)137 TEST(AddressSanitizer, QuarantineTest) {
138 BufferedStackTrace stack;
139 stack.trace_buffer[0] = 0x890;
140 stack.size = 1;
141
142 const int size = 1024;
143 void *p = __asan::asan_malloc(size, &stack);
144 __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
145 size_t i;
146 size_t max_i = 1 << 30;
147 for (i = 0; i < max_i; i++) {
148 void *p1 = __asan::asan_malloc(size, &stack);
149 __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
150 if (p1 == p) break;
151 }
152 EXPECT_GE(i, 10000U);
153 EXPECT_LT(i, max_i);
154 }
155
156 #if !defined(__NetBSD__)
ThreadedQuarantineTestWorker(void * unused)157 void *ThreadedQuarantineTestWorker(void *unused) {
158 (void)unused;
159 u32 seed = my_rand();
160 BufferedStackTrace stack;
161 stack.trace_buffer[0] = 0x890;
162 stack.size = 1;
163
164 for (size_t i = 0; i < 1000; i++) {
165 void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
166 __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
167 }
168 return NULL;
169 }
170
171 // Check that the thread local allocators are flushed when threads are
172 // destroyed.
TEST(AddressSanitizer,ThreadedQuarantineTest)173 TEST(AddressSanitizer, ThreadedQuarantineTest) {
174 // Run the routine once to warm up ASAN internal structures to get more
175 // predictable incremental memory changes.
176 pthread_t t;
177 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
178 PTHREAD_JOIN(t, 0);
179
180 const int n_threads = 3000;
181 size_t mmaped1 = __sanitizer_get_heap_size();
182 for (int i = 0; i < n_threads; i++) {
183 pthread_t t;
184 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
185 PTHREAD_JOIN(t, 0);
186 size_t mmaped2 = __sanitizer_get_heap_size();
187 // Figure out why this much memory is required.
188 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
189 }
190 }
191 #endif
192
ThreadedOneSizeMallocStress(void * unused)193 void *ThreadedOneSizeMallocStress(void *unused) {
194 (void)unused;
195 BufferedStackTrace stack;
196 stack.trace_buffer[0] = 0x890;
197 stack.size = 1;
198 const size_t kNumMallocs = 1000;
199 for (int iter = 0; iter < 1000; iter++) {
200 void *p[kNumMallocs];
201 for (size_t i = 0; i < kNumMallocs; i++) {
202 p[i] = __asan::asan_malloc(32, &stack);
203 }
204 for (size_t i = 0; i < kNumMallocs; i++) {
205 __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
206 }
207 }
208 return NULL;
209 }
210
TEST(AddressSanitizer,ThreadedOneSizeMallocStressTest)211 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
212 const int kNumThreads = 4;
213 pthread_t t[kNumThreads];
214 for (int i = 0; i < kNumThreads; i++) {
215 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
216 }
217 for (int i = 0; i < kNumThreads; i++) {
218 PTHREAD_JOIN(t[i], 0);
219 }
220 }
221
TEST(AddressSanitizer,ShadowRegionIsPoisonedTest)222 TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
223 using __asan::kHighMemEnd;
224 // Check that __asan_region_is_poisoned works for shadow regions.
225 uptr ptr = kLowShadowBeg + 200;
226 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
227 ptr = kShadowGapBeg + 200;
228 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
229 ptr = kHighShadowBeg + 200;
230 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
231 }
232
233 // Test __asan_load1 & friends.
TEST(AddressSanitizer,LoadStoreCallbacks)234 TEST(AddressSanitizer, LoadStoreCallbacks) {
235 typedef void (*CB)(uptr p);
236 CB cb[2][5] = {
237 {
238 __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16,
239 }, {
240 __asan_store1, __asan_store2, __asan_store4, __asan_store8,
241 __asan_store16,
242 }
243 };
244
245 uptr buggy_ptr;
246
247 __asan_test_only_reported_buggy_pointer = &buggy_ptr;
248 BufferedStackTrace stack;
249 stack.trace_buffer[0] = 0x890;
250 stack.size = 1;
251
252 for (uptr len = 16; len <= 32; len++) {
253 char *ptr = (char*) __asan::asan_malloc(len, &stack);
254 uptr p = reinterpret_cast<uptr>(ptr);
255 for (uptr is_write = 0; is_write <= 1; is_write++) {
256 for (uptr size_log = 0; size_log <= 4; size_log++) {
257 uptr size = 1 << size_log;
258 CB call = cb[is_write][size_log];
259 // Iterate only size-aligned offsets.
260 for (uptr offset = 0; offset <= len; offset += size) {
261 buggy_ptr = 0;
262 call(p + offset);
263 if (offset + size <= len)
264 EXPECT_EQ(buggy_ptr, 0U);
265 else
266 EXPECT_EQ(buggy_ptr, p + offset);
267 }
268 }
269 }
270 __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC);
271 }
272 __asan_test_only_reported_buggy_pointer = 0;
273 }
274