1 //===-- asan_interface_test.cc --------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "asan_test_utils.h"
14 #include "sanitizer_common/sanitizer_internal_defs.h"
15 #include <sanitizer/allocator_interface.h>
16 #include <sanitizer/asan_interface.h>
17 #include <vector>
18
TEST(AddressSanitizerInterface,GetEstimatedAllocatedSize)19 TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
20 EXPECT_EQ(0U, __sanitizer_get_estimated_allocated_size(0));
21 const size_t sizes[] = { 1, 30, 1<<30 };
22 for (size_t i = 0; i < 3; i++) {
23 EXPECT_EQ(sizes[i], __sanitizer_get_estimated_allocated_size(sizes[i]));
24 }
25 }
26
27 static const char* kGetAllocatedSizeErrorMsg =
28 "attempting to call __sanitizer_get_allocated_size";
29
TEST(AddressSanitizerInterface,GetAllocatedSizeAndOwnershipTest)30 TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
31 const size_t kArraySize = 100;
32 char *array = Ident((char*)malloc(kArraySize));
33 int *int_ptr = Ident(new int);
34
35 // Allocated memory is owned by allocator. Allocated size should be
36 // equal to requested size.
37 EXPECT_EQ(true, __sanitizer_get_ownership(array));
38 EXPECT_EQ(kArraySize, __sanitizer_get_allocated_size(array));
39 EXPECT_EQ(true, __sanitizer_get_ownership(int_ptr));
40 EXPECT_EQ(sizeof(int), __sanitizer_get_allocated_size(int_ptr));
41
42 // We cannot call GetAllocatedSize from the memory we didn't map,
43 // and from the interior pointers (not returned by previous malloc).
44 void *wild_addr = (void*)0x1;
45 EXPECT_FALSE(__sanitizer_get_ownership(wild_addr));
46 EXPECT_DEATH(__sanitizer_get_allocated_size(wild_addr),
47 kGetAllocatedSizeErrorMsg);
48 EXPECT_FALSE(__sanitizer_get_ownership(array + kArraySize / 2));
49 EXPECT_DEATH(__sanitizer_get_allocated_size(array + kArraySize / 2),
50 kGetAllocatedSizeErrorMsg);
51
52 // NULL is not owned, but is a valid argument for
53 // __sanitizer_get_allocated_size().
54 EXPECT_FALSE(__sanitizer_get_ownership(NULL));
55 EXPECT_EQ(0U, __sanitizer_get_allocated_size(NULL));
56
57 // When memory is freed, it's not owned, and call to GetAllocatedSize
58 // is forbidden.
59 free(array);
60 EXPECT_FALSE(__sanitizer_get_ownership(array));
61 EXPECT_DEATH(__sanitizer_get_allocated_size(array),
62 kGetAllocatedSizeErrorMsg);
63 delete int_ptr;
64
65 void *zero_alloc = Ident(malloc(0));
66 if (zero_alloc != 0) {
67 // If malloc(0) is not null, this pointer is owned and should have valid
68 // allocated size.
69 EXPECT_TRUE(__sanitizer_get_ownership(zero_alloc));
70 // Allocated size is 0 or 1 depending on the allocator used.
71 EXPECT_LT(__sanitizer_get_allocated_size(zero_alloc), 2U);
72 }
73 free(zero_alloc);
74 }
75
TEST(AddressSanitizerInterface,GetCurrentAllocatedBytesTest)76 TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
77 size_t before_malloc, after_malloc, after_free;
78 char *array;
79 const size_t kMallocSize = 100;
80 before_malloc = __sanitizer_get_current_allocated_bytes();
81
82 array = Ident((char*)malloc(kMallocSize));
83 after_malloc = __sanitizer_get_current_allocated_bytes();
84 EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
85
86 free(array);
87 after_free = __sanitizer_get_current_allocated_bytes();
88 EXPECT_EQ(before_malloc, after_free);
89 }
90
TEST(AddressSanitizerInterface,GetHeapSizeTest)91 TEST(AddressSanitizerInterface, GetHeapSizeTest) {
92 // ASan allocator does not keep huge chunks in free list, but unmaps them.
93 // The chunk should be greater than the quarantine size,
94 // otherwise it will be stuck in quarantine instead of being unmaped.
95 static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M
96 free(Ident(malloc(kLargeMallocSize))); // Drain quarantine.
97 size_t old_heap_size = __sanitizer_get_heap_size();
98 for (int i = 0; i < 3; i++) {
99 // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
100 free(Ident(malloc(kLargeMallocSize)));
101 EXPECT_EQ(old_heap_size, __sanitizer_get_heap_size());
102 }
103 }
104
105 #if !defined(__NetBSD__)
106 static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
107 static const size_t kManyThreadsIterations = 250;
108 static const size_t kManyThreadsNumThreads =
109 (SANITIZER_WORDSIZE == 32) ? 40 : 200;
110
ManyThreadsWithStatsWorker(void * arg)111 static void *ManyThreadsWithStatsWorker(void *arg) {
112 (void)arg;
113 for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
114 for (size_t size_index = 0; size_index < 4; size_index++) {
115 free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
116 }
117 }
118 // Just one large allocation.
119 free(Ident(malloc(1 << 20)));
120 return 0;
121 }
122
TEST(AddressSanitizerInterface,ManyThreadsWithStatsStressTest)123 TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
124 size_t before_test, after_test, i;
125 pthread_t threads[kManyThreadsNumThreads];
126 before_test = __sanitizer_get_current_allocated_bytes();
127 for (i = 0; i < kManyThreadsNumThreads; i++) {
128 PTHREAD_CREATE(&threads[i], 0,
129 (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
130 }
131 for (i = 0; i < kManyThreadsNumThreads; i++) {
132 PTHREAD_JOIN(threads[i], 0);
133 }
134 after_test = __sanitizer_get_current_allocated_bytes();
135 // ASan stats also reflect memory usage of internal ASan RTL structs,
136 // so we can't check for equality here.
137 EXPECT_LT(after_test, before_test + (1UL<<20));
138 }
139 #endif
140
DoDoubleFree()141 static void DoDoubleFree() {
142 int *x = Ident(new int);
143 delete Ident(x);
144 delete Ident(x);
145 }
146
MyDeathCallback()147 static void MyDeathCallback() {
148 fprintf(stderr, "MyDeathCallback\n");
149 fflush(0); // On Windows, stderr doesn't flush on crash.
150 }
151
TEST(AddressSanitizerInterface,DeathCallbackTest)152 TEST(AddressSanitizerInterface, DeathCallbackTest) {
153 __asan_set_death_callback(MyDeathCallback);
154 EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
155 __asan_set_death_callback(NULL);
156 }
157
158 #define GOOD_ACCESS(ptr, offset) \
159 EXPECT_FALSE(__asan_address_is_poisoned(ptr + offset))
160
161 #define BAD_ACCESS(ptr, offset) \
162 EXPECT_TRUE(__asan_address_is_poisoned(ptr + offset))
163
164 #if !defined(ASAN_SHADOW_SCALE) || ASAN_SHADOW_SCALE == 3
165 static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
166
TEST(AddressSanitizerInterface,SimplePoisonMemoryRegionTest)167 TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
168 char *array = Ident((char*)malloc(120));
169 // poison array[40..80)
170 __asan_poison_memory_region(array + 40, 40);
171 GOOD_ACCESS(array, 39);
172 GOOD_ACCESS(array, 80);
173 BAD_ACCESS(array, 40);
174 BAD_ACCESS(array, 60);
175 BAD_ACCESS(array, 79);
176 char value;
177 EXPECT_DEATH(value = Ident(array[40]), kUseAfterPoisonErrorMessage);
178 __asan_unpoison_memory_region(array + 40, 40);
179 // access previously poisoned memory.
180 GOOD_ACCESS(array, 40);
181 GOOD_ACCESS(array, 79);
182 free(array);
183 }
184
TEST(AddressSanitizerInterface,OverlappingPoisonMemoryRegionTest)185 TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
186 char *array = Ident((char*)malloc(120));
187 // Poison [0..40) and [80..120)
188 __asan_poison_memory_region(array, 40);
189 __asan_poison_memory_region(array + 80, 40);
190 BAD_ACCESS(array, 20);
191 GOOD_ACCESS(array, 60);
192 BAD_ACCESS(array, 100);
193 // Poison whole array - [0..120)
194 __asan_poison_memory_region(array, 120);
195 BAD_ACCESS(array, 60);
196 // Unpoison [24..96)
197 __asan_unpoison_memory_region(array + 24, 72);
198 BAD_ACCESS(array, 23);
199 GOOD_ACCESS(array, 24);
200 GOOD_ACCESS(array, 60);
201 GOOD_ACCESS(array, 95);
202 BAD_ACCESS(array, 96);
203 free(array);
204 }
205 #endif // !defined(ASAN_SHADOW_SCALE) || ASAN_SHADOW_SCALE == 3
206
TEST(AddressSanitizerInterface,PushAndPopWithPoisoningTest)207 TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
208 // Vector of capacity 20
209 char *vec = Ident((char*)malloc(20));
210 __asan_poison_memory_region(vec, 20);
211 for (size_t i = 0; i < 7; i++) {
212 // Simulate push_back.
213 __asan_unpoison_memory_region(vec + i, 1);
214 GOOD_ACCESS(vec, i);
215 BAD_ACCESS(vec, i + 1);
216 }
217 for (size_t i = 7; i > 0; i--) {
218 // Simulate pop_back.
219 __asan_poison_memory_region(vec + i - 1, 1);
220 BAD_ACCESS(vec, i - 1);
221 if (i > 1) GOOD_ACCESS(vec, i - 2);
222 }
223 free(vec);
224 }
225
226 #if !defined(ASAN_SHADOW_SCALE) || ASAN_SHADOW_SCALE == 3
227 // Make sure that each aligned block of size "2^granularity" doesn't have
228 // "true" value before "false" value.
MakeShadowValid(bool * shadow,int length,int granularity)229 static void MakeShadowValid(bool *shadow, int length, int granularity) {
230 bool can_be_poisoned = true;
231 for (int i = length - 1; i >= 0; i--) {
232 if (!shadow[i])
233 can_be_poisoned = false;
234 if (!can_be_poisoned)
235 shadow[i] = false;
236 if (i % (1 << granularity) == 0) {
237 can_be_poisoned = true;
238 }
239 }
240 }
241
TEST(AddressSanitizerInterface,PoisoningStressTest)242 TEST(AddressSanitizerInterface, PoisoningStressTest) {
243 const size_t kSize = 24;
244 bool expected[kSize];
245 char *arr = Ident((char*)malloc(kSize));
246 for (size_t l1 = 0; l1 < kSize; l1++) {
247 for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
248 for (size_t l2 = 0; l2 < kSize; l2++) {
249 for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
250 // Poison [l1, l1+s1), [l2, l2+s2) and check result.
251 __asan_unpoison_memory_region(arr, kSize);
252 __asan_poison_memory_region(arr + l1, s1);
253 __asan_poison_memory_region(arr + l2, s2);
254 memset(expected, false, kSize);
255 memset(expected + l1, true, s1);
256 MakeShadowValid(expected, kSize, /*granularity*/ 3);
257 memset(expected + l2, true, s2);
258 MakeShadowValid(expected, kSize, /*granularity*/ 3);
259 for (size_t i = 0; i < kSize; i++) {
260 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
261 }
262 // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
263 __asan_poison_memory_region(arr, kSize);
264 __asan_unpoison_memory_region(arr + l1, s1);
265 __asan_unpoison_memory_region(arr + l2, s2);
266 memset(expected, true, kSize);
267 memset(expected + l1, false, s1);
268 MakeShadowValid(expected, kSize, /*granularity*/ 3);
269 memset(expected + l2, false, s2);
270 MakeShadowValid(expected, kSize, /*granularity*/ 3);
271 for (size_t i = 0; i < kSize; i++) {
272 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
273 }
274 }
275 }
276 }
277 }
278 free(arr);
279 }
280 #endif // !defined(ASAN_SHADOW_SCALE) || ASAN_SHADOW_SCALE == 3
281
TEST(AddressSanitizerInterface,GlobalRedzones)282 TEST(AddressSanitizerInterface, GlobalRedzones) {
283 GOOD_ACCESS(glob1, 1 - 1);
284 GOOD_ACCESS(glob2, 2 - 1);
285 GOOD_ACCESS(glob3, 3 - 1);
286 GOOD_ACCESS(glob4, 4 - 1);
287 GOOD_ACCESS(glob5, 5 - 1);
288 GOOD_ACCESS(glob6, 6 - 1);
289 GOOD_ACCESS(glob7, 7 - 1);
290 GOOD_ACCESS(glob8, 8 - 1);
291 GOOD_ACCESS(glob9, 9 - 1);
292 GOOD_ACCESS(glob10, 10 - 1);
293 GOOD_ACCESS(glob11, 11 - 1);
294 GOOD_ACCESS(glob12, 12 - 1);
295 GOOD_ACCESS(glob13, 13 - 1);
296 GOOD_ACCESS(glob14, 14 - 1);
297 GOOD_ACCESS(glob15, 15 - 1);
298 GOOD_ACCESS(glob16, 16 - 1);
299 GOOD_ACCESS(glob17, 17 - 1);
300 GOOD_ACCESS(glob1000, 1000 - 1);
301 GOOD_ACCESS(glob10000, 10000 - 1);
302 GOOD_ACCESS(glob100000, 100000 - 1);
303
304 BAD_ACCESS(glob1, 1);
305 BAD_ACCESS(glob2, 2);
306 BAD_ACCESS(glob3, 3);
307 BAD_ACCESS(glob4, 4);
308 BAD_ACCESS(glob5, 5);
309 BAD_ACCESS(glob6, 6);
310 BAD_ACCESS(glob7, 7);
311 BAD_ACCESS(glob8, 8);
312 BAD_ACCESS(glob9, 9);
313 BAD_ACCESS(glob10, 10);
314 BAD_ACCESS(glob11, 11);
315 BAD_ACCESS(glob12, 12);
316 BAD_ACCESS(glob13, 13);
317 BAD_ACCESS(glob14, 14);
318 BAD_ACCESS(glob15, 15);
319 BAD_ACCESS(glob16, 16);
320 BAD_ACCESS(glob17, 17);
321 BAD_ACCESS(glob1000, 1000);
322 BAD_ACCESS(glob1000, 1100); // Redzone is at least 101 bytes.
323 BAD_ACCESS(glob10000, 10000);
324 BAD_ACCESS(glob10000, 11000); // Redzone is at least 1001 bytes.
325 BAD_ACCESS(glob100000, 100000);
326 BAD_ACCESS(glob100000, 110000); // Redzone is at least 10001 bytes.
327 }
328
TEST(AddressSanitizerInterface,PoisonedRegion)329 TEST(AddressSanitizerInterface, PoisonedRegion) {
330 size_t rz = 16;
331 for (size_t size = 1; size <= 64; size++) {
332 char *p = new char[size];
333 for (size_t beg = 0; beg < size + rz; beg++) {
334 for (size_t end = beg; end < size + rz; end++) {
335 void *first_poisoned = __asan_region_is_poisoned(p + beg, end - beg);
336 if (beg == end) {
337 EXPECT_FALSE(first_poisoned);
338 } else if (beg < size && end <= size) {
339 EXPECT_FALSE(first_poisoned);
340 } else if (beg >= size) {
341 EXPECT_EQ(p + beg, first_poisoned);
342 } else {
343 EXPECT_GT(end, size);
344 EXPECT_EQ(p + size, first_poisoned);
345 }
346 }
347 }
348 delete [] p;
349 }
350 }
351
352 // This is a performance benchmark for manual runs.
353 // asan's memset interceptor calls mem_is_zero for the entire shadow region.
354 // the profile should look like this:
355 // 89.10% [.] __memset_sse2
356 // 10.50% [.] __sanitizer::mem_is_zero
357 // I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
358 // than memset itself.
TEST(AddressSanitizerInterface,DISABLED_StressLargeMemset)359 TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) {
360 size_t size = 1 << 20;
361 char *x = new char[size];
362 for (int i = 0; i < 100000; i++)
363 Ident(memset)(x, 0, size);
364 delete [] x;
365 }
366
367 // Same here, but we run memset with small sizes.
TEST(AddressSanitizerInterface,DISABLED_StressSmallMemset)368 TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) {
369 size_t size = 32;
370 char *x = new char[size];
371 for (int i = 0; i < 100000000; i++)
372 Ident(memset)(x, 0, size);
373 delete [] x;
374 }
375 static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
376 static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
377
TEST(AddressSanitizerInterface,DISABLED_InvalidPoisonAndUnpoisonCallsTest)378 TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
379 char *array = Ident((char*)malloc(120));
380 __asan_unpoison_memory_region(array, 120);
381 // Try to unpoison not owned memory
382 EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
383 kInvalidUnpoisonMessage);
384 EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
385 kInvalidUnpoisonMessage);
386
387 __asan_poison_memory_region(array, 120);
388 // Try to poison not owned memory.
389 EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
390 EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
391 kInvalidPoisonMessage);
392 free(array);
393 }
394
TEST(AddressSanitizerInterface,GetOwnershipStressTest)395 TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
396 std::vector<char *> pointers;
397 std::vector<size_t> sizes;
398 const size_t kNumMallocs = 1 << 9;
399 for (size_t i = 0; i < kNumMallocs; i++) {
400 size_t size = i * 100 + 1;
401 pointers.push_back((char*)malloc(size));
402 sizes.push_back(size);
403 }
404 for (size_t i = 0; i < 4000000; i++) {
405 EXPECT_FALSE(__sanitizer_get_ownership(&pointers));
406 EXPECT_FALSE(__sanitizer_get_ownership((void*)0x1234));
407 size_t idx = i % kNumMallocs;
408 EXPECT_TRUE(__sanitizer_get_ownership(pointers[idx]));
409 EXPECT_EQ(sizes[idx], __sanitizer_get_allocated_size(pointers[idx]));
410 }
411 for (size_t i = 0, n = pointers.size(); i < n; i++)
412 free(pointers[i]);
413 }
414
TEST(AddressSanitizerInterface,HandleNoReturnTest)415 TEST(AddressSanitizerInterface, HandleNoReturnTest) {
416 char array[40];
417 __asan_poison_memory_region(array, sizeof(array));
418 BAD_ACCESS(array, 20);
419 __asan_handle_no_return();
420 // It unpoisons the whole thread stack.
421 GOOD_ACCESS(array, 20);
422 }
423