1 //===-- sanitizer_common_test.cpp -----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime. 10 // 11 //===----------------------------------------------------------------------===// 12 #include <algorithm> 13 14 // This ensures that including both internal sanitizer_common headers 15 // and the interface headers does not lead to compilation failures. 16 // Both may be included in unit tests, where googletest transitively 17 // pulls in sanitizer interface headers. 18 // The headers are specifically included using relative paths, 19 // because a compiler may use a different mismatching version 20 // of sanitizer headers. 21 #include "../../../include/sanitizer/asan_interface.h" 22 #include "../../../include/sanitizer/msan_interface.h" 23 #include "../../../include/sanitizer/tsan_interface.h" 24 #include "gtest/gtest.h" 25 #include "sanitizer_common/sanitizer_allocator_internal.h" 26 #include "sanitizer_common/sanitizer_common.h" 27 #include "sanitizer_common/sanitizer_file.h" 28 #include "sanitizer_common/sanitizer_flags.h" 29 #include "sanitizer_common/sanitizer_libc.h" 30 #include "sanitizer_common/sanitizer_platform.h" 31 #include "sanitizer_pthread_wrappers.h" 32 33 namespace __sanitizer { 34 35 static bool IsSorted(const uptr *array, uptr n) { 36 for (uptr i = 1; i < n; i++) { 37 if (array[i] < array[i - 1]) return false; 38 } 39 return true; 40 } 41 42 TEST(SanitizerCommon, SortTest) { 43 uptr array[100]; 44 uptr n = 100; 45 // Already sorted. 46 for (uptr i = 0; i < n; i++) { 47 array[i] = i; 48 } 49 Sort(array, n); 50 EXPECT_TRUE(IsSorted(array, n)); 51 // Reverse order. 52 for (uptr i = 0; i < n; i++) { 53 array[i] = n - 1 - i; 54 } 55 Sort(array, n); 56 EXPECT_TRUE(IsSorted(array, n)); 57 // Mixed order. 58 for (uptr i = 0; i < n; i++) { 59 array[i] = (i % 2 == 0) ? i : n - 1 - i; 60 } 61 Sort(array, n); 62 EXPECT_TRUE(IsSorted(array, n)); 63 // All equal. 64 for (uptr i = 0; i < n; i++) { 65 array[i] = 42; 66 } 67 Sort(array, n); 68 EXPECT_TRUE(IsSorted(array, n)); 69 // All but one sorted. 70 for (uptr i = 0; i < n - 1; i++) { 71 array[i] = i; 72 } 73 array[n - 1] = 42; 74 Sort(array, n); 75 EXPECT_TRUE(IsSorted(array, n)); 76 // Minimal case - sort three elements. 77 array[0] = 1; 78 array[1] = 0; 79 Sort(array, 2); 80 EXPECT_TRUE(IsSorted(array, 2)); 81 } 82 83 TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) { 84 uptr PageSize = GetPageSizeCached(); 85 for (uptr size = 1; size <= 32; size *= 2) { 86 for (uptr alignment = 1; alignment <= 32; alignment *= 2) { 87 for (int iter = 0; iter < 100; iter++) { 88 uptr res = (uptr)MmapAlignedOrDieOnFatalError( 89 size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest"); 90 EXPECT_EQ(0U, res % (alignment * PageSize)); 91 internal_memset((void*)res, 1, size * PageSize); 92 UnmapOrDie((void*)res, size * PageSize); 93 } 94 } 95 } 96 } 97 98 TEST(SanitizerCommon, Mprotect) { 99 uptr PageSize = GetPageSizeCached(); 100 u8 *mem = reinterpret_cast<u8 *>(MmapOrDie(PageSize, "MprotectTest")); 101 for (u8 *p = mem; p < mem + PageSize; ++p) ++(*p); 102 103 MprotectReadOnly(reinterpret_cast<uptr>(mem), PageSize); 104 for (u8 *p = mem; p < mem + PageSize; ++p) EXPECT_EQ(1u, *p); 105 EXPECT_DEATH(++mem[0], ""); 106 EXPECT_DEATH(++mem[PageSize / 2], ""); 107 EXPECT_DEATH(++mem[PageSize - 1], ""); 108 109 MprotectNoAccess(reinterpret_cast<uptr>(mem), PageSize); 110 volatile u8 t; 111 (void)t; 112 EXPECT_DEATH(t = mem[0], ""); 113 EXPECT_DEATH(t = mem[PageSize / 2], ""); 114 EXPECT_DEATH(t = mem[PageSize - 1], ""); 115 } 116 117 TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) { 118 InternalMmapVector<uptr> v; 119 v.reserve(1); 120 CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr)); 121 } 122 123 TEST(SanitizerCommon, InternalMmapVectorReize) { 124 InternalMmapVector<uptr> v; 125 CHECK_EQ(0U, v.size()); 126 CHECK_GE(v.capacity(), v.size()); 127 128 v.reserve(1000); 129 CHECK_EQ(0U, v.size()); 130 CHECK_GE(v.capacity(), 1000U); 131 132 v.resize(10000); 133 CHECK_EQ(10000U, v.size()); 134 CHECK_GE(v.capacity(), v.size()); 135 uptr cap = v.capacity(); 136 137 v.resize(100); 138 CHECK_EQ(100U, v.size()); 139 CHECK_EQ(v.capacity(), cap); 140 141 v.reserve(10); 142 CHECK_EQ(100U, v.size()); 143 CHECK_EQ(v.capacity(), cap); 144 } 145 146 TEST(SanitizerCommon, InternalMmapVector) { 147 InternalMmapVector<uptr> vector; 148 for (uptr i = 0; i < 100; i++) { 149 EXPECT_EQ(i, vector.size()); 150 vector.push_back(i); 151 } 152 for (uptr i = 0; i < 100; i++) { 153 EXPECT_EQ(i, vector[i]); 154 } 155 for (int i = 99; i >= 0; i--) { 156 EXPECT_EQ((uptr)i, vector.back()); 157 vector.pop_back(); 158 EXPECT_EQ((uptr)i, vector.size()); 159 } 160 InternalMmapVector<uptr> empty_vector; 161 CHECK_EQ(empty_vector.capacity(), 0U); 162 CHECK_EQ(0U, empty_vector.size()); 163 } 164 165 TEST(SanitizerCommon, InternalMmapVectorEq) { 166 InternalMmapVector<uptr> vector1; 167 InternalMmapVector<uptr> vector2; 168 for (uptr i = 0; i < 100; i++) { 169 vector1.push_back(i); 170 vector2.push_back(i); 171 } 172 EXPECT_TRUE(vector1 == vector2); 173 EXPECT_FALSE(vector1 != vector2); 174 175 vector1.push_back(1); 176 EXPECT_FALSE(vector1 == vector2); 177 EXPECT_TRUE(vector1 != vector2); 178 179 vector2.push_back(1); 180 EXPECT_TRUE(vector1 == vector2); 181 EXPECT_FALSE(vector1 != vector2); 182 183 vector1[55] = 1; 184 EXPECT_FALSE(vector1 == vector2); 185 EXPECT_TRUE(vector1 != vector2); 186 } 187 188 TEST(SanitizerCommon, InternalMmapVectorSwap) { 189 InternalMmapVector<uptr> vector1; 190 InternalMmapVector<uptr> vector2; 191 InternalMmapVector<uptr> vector3; 192 InternalMmapVector<uptr> vector4; 193 for (uptr i = 0; i < 100; i++) { 194 vector1.push_back(i); 195 vector2.push_back(i); 196 vector3.push_back(-i); 197 vector4.push_back(-i); 198 } 199 EXPECT_NE(vector2, vector3); 200 EXPECT_NE(vector1, vector4); 201 vector1.swap(vector3); 202 EXPECT_EQ(vector2, vector3); 203 EXPECT_EQ(vector1, vector4); 204 } 205 206 void TestThreadInfo(bool main) { 207 uptr stk_addr = 0; 208 uptr stk_size = 0; 209 uptr tls_addr = 0; 210 uptr tls_size = 0; 211 GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size); 212 213 int stack_var; 214 EXPECT_NE(stk_addr, (uptr)0); 215 EXPECT_NE(stk_size, (uptr)0); 216 EXPECT_GT((uptr)&stack_var, stk_addr); 217 EXPECT_LT((uptr)&stack_var, stk_addr + stk_size); 218 219 #if SANITIZER_LINUX && defined(__x86_64__) 220 static __thread int thread_var; 221 EXPECT_NE(tls_addr, (uptr)0); 222 EXPECT_NE(tls_size, (uptr)0); 223 EXPECT_GT((uptr)&thread_var, tls_addr); 224 EXPECT_LT((uptr)&thread_var, tls_addr + tls_size); 225 226 // Ensure that tls and stack do not intersect. 227 uptr tls_end = tls_addr + tls_size; 228 EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size); 229 EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size); 230 EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr)); 231 #endif 232 } 233 234 static void *WorkerThread(void *arg) { 235 TestThreadInfo(false); 236 return 0; 237 } 238 239 TEST(SanitizerCommon, ThreadStackTlsMain) { 240 InitTlsSize(); 241 TestThreadInfo(true); 242 } 243 244 TEST(SanitizerCommon, ThreadStackTlsWorker) { 245 InitTlsSize(); 246 pthread_t t; 247 PTHREAD_CREATE(&t, 0, WorkerThread, 0); 248 PTHREAD_JOIN(t, 0); 249 } 250 251 bool UptrLess(uptr a, uptr b) { 252 return a < b; 253 } 254 255 TEST(SanitizerCommon, InternalLowerBound) { 256 std::vector<int> arr = {1, 3, 5, 7, 11}; 257 258 EXPECT_EQ(0u, InternalLowerBound(arr, 0)); 259 EXPECT_EQ(0u, InternalLowerBound(arr, 1)); 260 EXPECT_EQ(1u, InternalLowerBound(arr, 2)); 261 EXPECT_EQ(1u, InternalLowerBound(arr, 3)); 262 EXPECT_EQ(2u, InternalLowerBound(arr, 4)); 263 EXPECT_EQ(2u, InternalLowerBound(arr, 5)); 264 EXPECT_EQ(3u, InternalLowerBound(arr, 6)); 265 EXPECT_EQ(3u, InternalLowerBound(arr, 7)); 266 EXPECT_EQ(4u, InternalLowerBound(arr, 8)); 267 EXPECT_EQ(4u, InternalLowerBound(arr, 9)); 268 EXPECT_EQ(4u, InternalLowerBound(arr, 10)); 269 EXPECT_EQ(4u, InternalLowerBound(arr, 11)); 270 EXPECT_EQ(5u, InternalLowerBound(arr, 12)); 271 } 272 273 TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) { 274 std::vector<int> data; 275 auto create_item = [] (size_t i, size_t j) { 276 auto v = i * 10000 + j; 277 return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100; 278 }; 279 for (size_t i = 0; i < 1000; ++i) { 280 data.resize(i); 281 for (size_t j = 0; j < i; ++j) { 282 data[j] = create_item(i, j); 283 } 284 285 std::sort(data.begin(), data.end()); 286 287 for (size_t j = 0; j < i; ++j) { 288 int val = create_item(i, j); 289 for (auto to_find : {val - 1, val, val + 1}) { 290 uptr expected = 291 std::lower_bound(data.begin(), data.end(), to_find) - data.begin(); 292 EXPECT_EQ(expected, 293 InternalLowerBound(data, to_find, std::less<int>())); 294 } 295 } 296 } 297 } 298 299 class SortAndDedupTest : public ::testing::TestWithParam<std::vector<int>> {}; 300 301 TEST_P(SortAndDedupTest, SortAndDedup) { 302 std::vector<int> v_std = GetParam(); 303 std::sort(v_std.begin(), v_std.end()); 304 v_std.erase(std::unique(v_std.begin(), v_std.end()), v_std.end()); 305 306 std::vector<int> v = GetParam(); 307 SortAndDedup(v); 308 309 EXPECT_EQ(v_std, v); 310 } 311 312 const std::vector<int> kSortAndDedupTests[] = { 313 {}, 314 {1}, 315 {1, 1}, 316 {1, 1, 1}, 317 {1, 2, 3}, 318 {3, 2, 1}, 319 {1, 2, 2, 3}, 320 {3, 3, 2, 1, 2}, 321 {3, 3, 2, 1, 2}, 322 {1, 2, 1, 1, 2, 1, 1, 1, 2, 2}, 323 {1, 3, 3, 2, 3, 1, 3, 1, 4, 4, 2, 1, 4, 1, 1, 2, 2}, 324 }; 325 INSTANTIATE_TEST_SUITE_P(SortAndDedupTest, SortAndDedupTest, 326 ::testing::ValuesIn(kSortAndDedupTests)); 327 328 #if SANITIZER_LINUX && !SANITIZER_ANDROID 329 TEST(SanitizerCommon, FindPathToBinary) { 330 char *true_path = FindPathToBinary("true"); 331 EXPECT_NE((char*)0, internal_strstr(true_path, "/bin/true")); 332 InternalFree(true_path); 333 EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj")); 334 } 335 #elif SANITIZER_WINDOWS 336 TEST(SanitizerCommon, FindPathToBinary) { 337 // ntdll.dll should be on PATH in all supported test environments on all 338 // supported Windows versions. 339 char *ntdll_path = FindPathToBinary("ntdll.dll"); 340 EXPECT_NE((char*)0, internal_strstr(ntdll_path, "ntdll.dll")); 341 InternalFree(ntdll_path); 342 EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj")); 343 } 344 #endif 345 346 TEST(SanitizerCommon, StripPathPrefix) { 347 EXPECT_EQ(0, StripPathPrefix(0, "prefix")); 348 EXPECT_STREQ("foo", StripPathPrefix("foo", 0)); 349 EXPECT_STREQ("dir/file.cc", 350 StripPathPrefix("/usr/lib/dir/file.cc", "/usr/lib/")); 351 EXPECT_STREQ("/file.cc", StripPathPrefix("/usr/myroot/file.cc", "/myroot")); 352 EXPECT_STREQ("file.h", StripPathPrefix("/usr/lib/./file.h", "/usr/lib/")); 353 } 354 355 TEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) { 356 RemoveANSIEscapeSequencesFromString(nullptr); 357 const char *buffs[22] = { 358 "Default", "Default", 359 "\033[95mLight magenta", "Light magenta", 360 "\033[30mBlack\033[32mGreen\033[90mGray", "BlackGreenGray", 361 "\033[106mLight cyan \033[107mWhite ", "Light cyan White ", 362 "\033[31mHello\033[0m World", "Hello World", 363 "\033[38;5;82mHello \033[38;5;198mWorld", "Hello World", 364 "123[653456789012", "123[653456789012", 365 "Normal \033[5mBlink \033[25mNormal", "Normal Blink Normal", 366 "\033[106m\033[107m", "", 367 "", "", 368 " ", " ", 369 }; 370 371 for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) { 372 char *buffer_copy = internal_strdup(buffs[i]); 373 RemoveANSIEscapeSequencesFromString(buffer_copy); 374 EXPECT_STREQ(buffer_copy, buffs[i+1]); 375 InternalFree(buffer_copy); 376 } 377 } 378 379 TEST(SanitizerCommon, InternalScopedString) { 380 InternalScopedString str; 381 EXPECT_EQ(0U, str.length()); 382 EXPECT_STREQ("", str.data()); 383 384 str.append("foo"); 385 EXPECT_EQ(3U, str.length()); 386 EXPECT_STREQ("foo", str.data()); 387 388 int x = 1234; 389 str.append("%d", x); 390 EXPECT_EQ(7U, str.length()); 391 EXPECT_STREQ("foo1234", str.data()); 392 393 str.append("%d", x); 394 EXPECT_EQ(11U, str.length()); 395 EXPECT_STREQ("foo12341234", str.data()); 396 397 str.clear(); 398 EXPECT_EQ(0U, str.length()); 399 EXPECT_STREQ("", str.data()); 400 } 401 402 TEST(SanitizerCommon, InternalScopedStringLarge) { 403 InternalScopedString str; 404 std::string expected; 405 for (int i = 0; i < 1000; ++i) { 406 std::string append(i, 'a' + i % 26); 407 expected += append; 408 str.append("%s", append.c_str()); 409 EXPECT_EQ(expected, str.data()); 410 } 411 } 412 413 TEST(SanitizerCommon, InternalScopedStringLargeFormat) { 414 InternalScopedString str; 415 std::string expected; 416 for (int i = 0; i < 1000; ++i) { 417 std::string append(i, 'a' + i % 26); 418 expected += append; 419 str.append("%s", append.c_str()); 420 EXPECT_EQ(expected, str.data()); 421 } 422 } 423 424 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_IOS 425 TEST(SanitizerCommon, GetRandom) { 426 u8 buffer_1[32], buffer_2[32]; 427 for (bool blocking : { false, true }) { 428 EXPECT_FALSE(GetRandom(nullptr, 32, blocking)); 429 EXPECT_FALSE(GetRandom(buffer_1, 0, blocking)); 430 EXPECT_FALSE(GetRandom(buffer_1, 512, blocking)); 431 EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2)); 432 for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) { 433 for (uptr i = 0; i < 100; i++) { 434 EXPECT_TRUE(GetRandom(buffer_1, size, blocking)); 435 EXPECT_TRUE(GetRandom(buffer_2, size, blocking)); 436 EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0); 437 } 438 } 439 } 440 } 441 #endif 442 443 TEST(SanitizerCommon, ReservedAddressRangeInit) { 444 uptr init_size = 0xffff; 445 ReservedAddressRange address_range; 446 uptr res = address_range.Init(init_size); 447 CHECK_NE(res, (void*)-1); 448 UnmapOrDie((void*)res, init_size); 449 // Should be able to map into the same space now. 450 ReservedAddressRange address_range2; 451 uptr res2 = address_range2.Init(init_size, nullptr, res); 452 CHECK_EQ(res, res2); 453 454 // TODO(flowerhack): Once this is switched to the "real" implementation 455 // (rather than passing through to MmapNoAccess*), enforce and test "no 456 // double initializations allowed" 457 } 458 459 TEST(SanitizerCommon, ReservedAddressRangeMap) { 460 constexpr uptr init_size = 0xffff; 461 ReservedAddressRange address_range; 462 uptr res = address_range.Init(init_size); 463 CHECK_NE(res, (void*) -1); 464 465 // Valid mappings should succeed. 466 CHECK_EQ(res, address_range.Map(res, init_size)); 467 468 // Valid mappings should be readable. 469 unsigned char buffer[init_size]; 470 memcpy(buffer, reinterpret_cast<void *>(res), init_size); 471 472 // TODO(flowerhack): Once this is switched to the "real" implementation, make 473 // sure you can only mmap into offsets in the Init range. 474 } 475 476 TEST(SanitizerCommon, ReservedAddressRangeUnmap) { 477 uptr PageSize = GetPageSizeCached(); 478 uptr init_size = PageSize * 8; 479 ReservedAddressRange address_range; 480 uptr base_addr = address_range.Init(init_size); 481 CHECK_NE(base_addr, (void*)-1); 482 CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); 483 484 // Unmapping the entire range should succeed. 485 address_range.Unmap(base_addr, init_size); 486 487 // Map a new range. 488 base_addr = address_range.Init(init_size); 489 CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); 490 491 // Windows doesn't allow partial unmappings. 492 #if !SANITIZER_WINDOWS 493 494 // Unmapping at the beginning should succeed. 495 address_range.Unmap(base_addr, PageSize); 496 497 // Unmapping at the end should succeed. 498 uptr new_start = reinterpret_cast<uptr>(address_range.base()) + 499 address_range.size() - PageSize; 500 address_range.Unmap(new_start, PageSize); 501 502 #endif 503 504 // Unmapping in the middle of the ReservedAddressRange should fail. 505 EXPECT_DEATH(address_range.Unmap(base_addr + (PageSize * 2), PageSize), ".*"); 506 } 507 508 TEST(SanitizerCommon, ReadBinaryNameCached) { 509 char buf[256]; 510 EXPECT_NE((uptr)0, ReadBinaryNameCached(buf, sizeof(buf))); 511 } 512 513 } // namespace __sanitizer 514