1 //===-- sanitizer_common_test.cpp -----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime. 10 // 11 //===----------------------------------------------------------------------===// 12 #include <algorithm> 13 14 // This ensures that including both internal sanitizer_common headers 15 // and the interface headers does not lead to compilation failures. 16 // Both may be included in unit tests, where googletest transitively 17 // pulls in sanitizer interface headers. 18 // The headers are specifically included using relative paths, 19 // because a compiler may use a different mismatching version 20 // of sanitizer headers. 21 #include "../../../include/sanitizer/asan_interface.h" 22 #include "../../../include/sanitizer/msan_interface.h" 23 #include "../../../include/sanitizer/tsan_interface.h" 24 #include "gtest/gtest.h" 25 #include "sanitizer_common/sanitizer_allocator_internal.h" 26 #include "sanitizer_common/sanitizer_common.h" 27 #include "sanitizer_common/sanitizer_file.h" 28 #include "sanitizer_common/sanitizer_flags.h" 29 #include "sanitizer_common/sanitizer_libc.h" 30 #include "sanitizer_common/sanitizer_platform.h" 31 #include "sanitizer_pthread_wrappers.h" 32 33 namespace __sanitizer { 34 35 static bool IsSorted(const uptr *array, uptr n) { 36 for (uptr i = 1; i < n; i++) { 37 if (array[i] < array[i - 1]) return false; 38 } 39 return true; 40 } 41 42 TEST(SanitizerCommon, SortTest) { 43 uptr array[100]; 44 uptr n = 100; 45 // Already sorted. 46 for (uptr i = 0; i < n; i++) { 47 array[i] = i; 48 } 49 Sort(array, n); 50 EXPECT_TRUE(IsSorted(array, n)); 51 // Reverse order. 52 for (uptr i = 0; i < n; i++) { 53 array[i] = n - 1 - i; 54 } 55 Sort(array, n); 56 EXPECT_TRUE(IsSorted(array, n)); 57 // Mixed order. 58 for (uptr i = 0; i < n; i++) { 59 array[i] = (i % 2 == 0) ? i : n - 1 - i; 60 } 61 Sort(array, n); 62 EXPECT_TRUE(IsSorted(array, n)); 63 // All equal. 64 for (uptr i = 0; i < n; i++) { 65 array[i] = 42; 66 } 67 Sort(array, n); 68 EXPECT_TRUE(IsSorted(array, n)); 69 // All but one sorted. 70 for (uptr i = 0; i < n - 1; i++) { 71 array[i] = i; 72 } 73 array[n - 1] = 42; 74 Sort(array, n); 75 EXPECT_TRUE(IsSorted(array, n)); 76 // Minimal case - sort three elements. 77 array[0] = 1; 78 array[1] = 0; 79 Sort(array, 2); 80 EXPECT_TRUE(IsSorted(array, 2)); 81 } 82 83 TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) { 84 uptr PageSize = GetPageSizeCached(); 85 for (uptr size = 1; size <= 32; size *= 2) { 86 for (uptr alignment = 1; alignment <= 32; alignment *= 2) { 87 for (int iter = 0; iter < 100; iter++) { 88 uptr res = (uptr)MmapAlignedOrDieOnFatalError( 89 size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest"); 90 EXPECT_EQ(0U, res % (alignment * PageSize)); 91 internal_memset((void*)res, 1, size * PageSize); 92 UnmapOrDie((void*)res, size * PageSize); 93 } 94 } 95 } 96 } 97 98 TEST(SanitizerCommon, Mprotect) { 99 uptr PageSize = GetPageSizeCached(); 100 u8 *mem = reinterpret_cast<u8 *>(MmapOrDie(PageSize, "MprotectTest")); 101 for (u8 *p = mem; p < mem + PageSize; ++p) ++(*p); 102 103 MprotectReadOnly(reinterpret_cast<uptr>(mem), PageSize); 104 for (u8 *p = mem; p < mem + PageSize; ++p) EXPECT_EQ(1u, *p); 105 EXPECT_DEATH(++mem[0], ""); 106 EXPECT_DEATH(++mem[PageSize / 2], ""); 107 EXPECT_DEATH(++mem[PageSize - 1], ""); 108 109 MprotectNoAccess(reinterpret_cast<uptr>(mem), PageSize); 110 volatile u8 t; 111 (void)t; 112 EXPECT_DEATH(t = mem[0], ""); 113 EXPECT_DEATH(t = mem[PageSize / 2], ""); 114 EXPECT_DEATH(t = mem[PageSize - 1], ""); 115 } 116 117 TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) { 118 InternalMmapVector<uptr> v; 119 v.reserve(1); 120 CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr)); 121 } 122 123 TEST(SanitizerCommon, InternalMmapVectorReize) { 124 InternalMmapVector<uptr> v; 125 CHECK_EQ(0U, v.size()); 126 CHECK_GE(v.capacity(), v.size()); 127 128 v.reserve(1000); 129 CHECK_EQ(0U, v.size()); 130 CHECK_GE(v.capacity(), 1000U); 131 132 v.resize(10000); 133 CHECK_EQ(10000U, v.size()); 134 CHECK_GE(v.capacity(), v.size()); 135 uptr cap = v.capacity(); 136 137 v.resize(100); 138 CHECK_EQ(100U, v.size()); 139 CHECK_EQ(v.capacity(), cap); 140 141 v.reserve(10); 142 CHECK_EQ(100U, v.size()); 143 CHECK_EQ(v.capacity(), cap); 144 } 145 146 TEST(SanitizerCommon, InternalMmapVector) { 147 InternalMmapVector<uptr> vector; 148 for (uptr i = 0; i < 100; i++) { 149 EXPECT_EQ(i, vector.size()); 150 vector.push_back(i); 151 } 152 for (uptr i = 0; i < 100; i++) { 153 EXPECT_EQ(i, vector[i]); 154 } 155 for (int i = 99; i >= 0; i--) { 156 EXPECT_EQ((uptr)i, vector.back()); 157 vector.pop_back(); 158 EXPECT_EQ((uptr)i, vector.size()); 159 } 160 InternalMmapVector<uptr> empty_vector; 161 CHECK_EQ(empty_vector.capacity(), 0U); 162 CHECK_EQ(0U, empty_vector.size()); 163 } 164 165 TEST(SanitizerCommon, InternalMmapVectorEq) { 166 InternalMmapVector<uptr> vector1; 167 InternalMmapVector<uptr> vector2; 168 for (uptr i = 0; i < 100; i++) { 169 vector1.push_back(i); 170 vector2.push_back(i); 171 } 172 EXPECT_TRUE(vector1 == vector2); 173 EXPECT_FALSE(vector1 != vector2); 174 175 vector1.push_back(1); 176 EXPECT_FALSE(vector1 == vector2); 177 EXPECT_TRUE(vector1 != vector2); 178 179 vector2.push_back(1); 180 EXPECT_TRUE(vector1 == vector2); 181 EXPECT_FALSE(vector1 != vector2); 182 183 vector1[55] = 1; 184 EXPECT_FALSE(vector1 == vector2); 185 EXPECT_TRUE(vector1 != vector2); 186 } 187 188 TEST(SanitizerCommon, InternalMmapVectorSwap) { 189 InternalMmapVector<uptr> vector1; 190 InternalMmapVector<uptr> vector2; 191 InternalMmapVector<uptr> vector3; 192 InternalMmapVector<uptr> vector4; 193 for (uptr i = 0; i < 100; i++) { 194 vector1.push_back(i); 195 vector2.push_back(i); 196 vector3.push_back(-i); 197 vector4.push_back(-i); 198 } 199 EXPECT_NE(vector2, vector3); 200 EXPECT_NE(vector1, vector4); 201 vector1.swap(vector3); 202 EXPECT_EQ(vector2, vector3); 203 EXPECT_EQ(vector1, vector4); 204 } 205 206 void TestThreadInfo(bool main) { 207 uptr stk_begin = 0; 208 uptr stk_end = 0; 209 uptr tls_begin = 0; 210 uptr tls_end = 0; 211 GetThreadStackAndTls(main, &stk_begin, &stk_end, &tls_begin, &tls_end); 212 213 int stack_var; 214 EXPECT_NE(stk_begin, (uptr)0); 215 EXPECT_GT(stk_end, stk_begin); 216 EXPECT_GT((uptr)&stack_var, stk_begin); 217 EXPECT_LT((uptr)&stack_var, stk_end); 218 219 #if SANITIZER_LINUX && defined(__x86_64__) 220 static __thread int thread_var; 221 EXPECT_NE(tls_begin, (uptr)0); 222 EXPECT_GT(tls_end, tls_begin); 223 EXPECT_GT((uptr)&thread_var, tls_begin); 224 EXPECT_LT((uptr)&thread_var, tls_end); 225 226 // Ensure that tls and stack do not intersect. 227 EXPECT_TRUE(tls_begin < stk_begin || tls_begin >= stk_end); 228 EXPECT_TRUE(tls_end < stk_begin || tls_end >= stk_end); 229 EXPECT_TRUE((tls_begin < stk_begin) == (tls_end < stk_begin)); 230 #endif 231 } 232 233 static void *WorkerThread(void *arg) { 234 TestThreadInfo(false); 235 return 0; 236 } 237 238 TEST(SanitizerCommon, ThreadStackTlsMain) { 239 InitializePlatformEarly(); 240 TestThreadInfo(true); 241 } 242 243 TEST(SanitizerCommon, ThreadStackTlsWorker) { 244 InitializePlatformEarly(); 245 pthread_t t; 246 PTHREAD_CREATE(&t, 0, WorkerThread, 0); 247 PTHREAD_JOIN(t, 0); 248 } 249 250 bool UptrLess(uptr a, uptr b) { 251 return a < b; 252 } 253 254 TEST(SanitizerCommon, InternalLowerBound) { 255 std::vector<int> arr = {1, 3, 5, 7, 11}; 256 257 EXPECT_EQ(0u, InternalLowerBound(arr, 0)); 258 EXPECT_EQ(0u, InternalLowerBound(arr, 1)); 259 EXPECT_EQ(1u, InternalLowerBound(arr, 2)); 260 EXPECT_EQ(1u, InternalLowerBound(arr, 3)); 261 EXPECT_EQ(2u, InternalLowerBound(arr, 4)); 262 EXPECT_EQ(2u, InternalLowerBound(arr, 5)); 263 EXPECT_EQ(3u, InternalLowerBound(arr, 6)); 264 EXPECT_EQ(3u, InternalLowerBound(arr, 7)); 265 EXPECT_EQ(4u, InternalLowerBound(arr, 8)); 266 EXPECT_EQ(4u, InternalLowerBound(arr, 9)); 267 EXPECT_EQ(4u, InternalLowerBound(arr, 10)); 268 EXPECT_EQ(4u, InternalLowerBound(arr, 11)); 269 EXPECT_EQ(5u, InternalLowerBound(arr, 12)); 270 } 271 272 TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) { 273 std::vector<int> data; 274 auto create_item = [] (size_t i, size_t j) { 275 auto v = i * 10000 + j; 276 return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100; 277 }; 278 for (size_t i = 0; i < 1000; ++i) { 279 data.resize(i); 280 for (size_t j = 0; j < i; ++j) { 281 data[j] = create_item(i, j); 282 } 283 284 std::sort(data.begin(), data.end()); 285 286 for (size_t j = 0; j < i; ++j) { 287 int val = create_item(i, j); 288 for (auto to_find : {val - 1, val, val + 1}) { 289 uptr expected = 290 std::lower_bound(data.begin(), data.end(), to_find) - data.begin(); 291 EXPECT_EQ(expected, 292 InternalLowerBound(data, to_find, std::less<int>())); 293 } 294 } 295 } 296 } 297 298 class SortAndDedupTest : public ::testing::TestWithParam<std::vector<int>> {}; 299 300 TEST_P(SortAndDedupTest, SortAndDedup) { 301 std::vector<int> v_std = GetParam(); 302 std::sort(v_std.begin(), v_std.end()); 303 v_std.erase(std::unique(v_std.begin(), v_std.end()), v_std.end()); 304 305 std::vector<int> v = GetParam(); 306 SortAndDedup(v); 307 308 EXPECT_EQ(v_std, v); 309 } 310 311 const std::vector<int> kSortAndDedupTests[] = { 312 {}, 313 {1}, 314 {1, 1}, 315 {1, 1, 1}, 316 {1, 2, 3}, 317 {3, 2, 1}, 318 {1, 2, 2, 3}, 319 {3, 3, 2, 1, 2}, 320 {3, 3, 2, 1, 2}, 321 {1, 2, 1, 1, 2, 1, 1, 1, 2, 2}, 322 {1, 3, 3, 2, 3, 1, 3, 1, 4, 4, 2, 1, 4, 1, 1, 2, 2}, 323 }; 324 INSTANTIATE_TEST_SUITE_P(SortAndDedupTest, SortAndDedupTest, 325 ::testing::ValuesIn(kSortAndDedupTests)); 326 327 #if SANITIZER_LINUX && !SANITIZER_ANDROID 328 TEST(SanitizerCommon, FindPathToBinary) { 329 char *true_path = FindPathToBinary("true"); 330 EXPECT_NE((char*)0, internal_strstr(true_path, "/bin/true")); 331 InternalFree(true_path); 332 EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj")); 333 } 334 #elif SANITIZER_WINDOWS 335 TEST(SanitizerCommon, FindPathToBinary) { 336 // ntdll.dll should be on PATH in all supported test environments on all 337 // supported Windows versions. 338 char *ntdll_path = FindPathToBinary("ntdll.dll"); 339 EXPECT_NE((char*)0, internal_strstr(ntdll_path, "ntdll.dll")); 340 InternalFree(ntdll_path); 341 EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj")); 342 } 343 #endif 344 345 TEST(SanitizerCommon, StripPathPrefix) { 346 EXPECT_EQ(0, StripPathPrefix(0, "prefix")); 347 EXPECT_STREQ("foo", StripPathPrefix("foo", 0)); 348 EXPECT_STREQ("dir/file.cc", 349 StripPathPrefix("/usr/lib/dir/file.cc", "/usr/lib/")); 350 EXPECT_STREQ("/file.cc", StripPathPrefix("/usr/myroot/file.cc", "/myroot")); 351 EXPECT_STREQ("file.h", StripPathPrefix("/usr/lib/./file.h", "/usr/lib/")); 352 } 353 354 TEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) { 355 RemoveANSIEscapeSequencesFromString(nullptr); 356 const char *buffs[22] = { 357 "Default", "Default", 358 "\033[95mLight magenta", "Light magenta", 359 "\033[30mBlack\033[32mGreen\033[90mGray", "BlackGreenGray", 360 "\033[106mLight cyan \033[107mWhite ", "Light cyan White ", 361 "\033[31mHello\033[0m World", "Hello World", 362 "\033[38;5;82mHello \033[38;5;198mWorld", "Hello World", 363 "123[653456789012", "123[653456789012", 364 "Normal \033[5mBlink \033[25mNormal", "Normal Blink Normal", 365 "\033[106m\033[107m", "", 366 "", "", 367 " ", " ", 368 }; 369 370 for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) { 371 char *buffer_copy = internal_strdup(buffs[i]); 372 RemoveANSIEscapeSequencesFromString(buffer_copy); 373 EXPECT_STREQ(buffer_copy, buffs[i+1]); 374 InternalFree(buffer_copy); 375 } 376 } 377 378 TEST(SanitizerCommon, InternalScopedStringAppend) { 379 InternalScopedString str; 380 EXPECT_EQ(0U, str.length()); 381 EXPECT_STREQ("", str.data()); 382 383 str.Append(""); 384 EXPECT_EQ(0U, str.length()); 385 EXPECT_STREQ("", str.data()); 386 387 str.Append("foo"); 388 EXPECT_EQ(3U, str.length()); 389 EXPECT_STREQ("foo", str.data()); 390 391 str.Append(""); 392 EXPECT_EQ(3U, str.length()); 393 EXPECT_STREQ("foo", str.data()); 394 395 str.Append("123\000456"); 396 EXPECT_EQ(6U, str.length()); 397 EXPECT_STREQ("foo123", str.data()); 398 } 399 400 TEST(SanitizerCommon, InternalScopedStringAppendF) { 401 InternalScopedString str; 402 EXPECT_EQ(0U, str.length()); 403 EXPECT_STREQ("", str.data()); 404 405 str.AppendF("foo"); 406 EXPECT_EQ(3U, str.length()); 407 EXPECT_STREQ("foo", str.data()); 408 409 int x = 1234; 410 str.AppendF("%d", x); 411 EXPECT_EQ(7U, str.length()); 412 EXPECT_STREQ("foo1234", str.data()); 413 414 str.AppendF("%d", x); 415 EXPECT_EQ(11U, str.length()); 416 EXPECT_STREQ("foo12341234", str.data()); 417 418 str.clear(); 419 EXPECT_EQ(0U, str.length()); 420 EXPECT_STREQ("", str.data()); 421 } 422 423 TEST(SanitizerCommon, InternalScopedStringLarge) { 424 InternalScopedString str; 425 std::string expected; 426 for (int i = 0; i < 1000; ++i) { 427 std::string append(i, 'a' + i % 26); 428 expected += append; 429 str.AppendF("%s", append.c_str()); 430 EXPECT_EQ(expected, str.data()); 431 } 432 } 433 434 TEST(SanitizerCommon, InternalScopedStringLargeFormat) { 435 InternalScopedString str; 436 std::string expected; 437 for (int i = 0; i < 1000; ++i) { 438 std::string append(i, 'a' + i % 26); 439 expected += append; 440 str.AppendF("%s", append.c_str()); 441 EXPECT_EQ(expected, str.data()); 442 } 443 } 444 445 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_IOS 446 TEST(SanitizerCommon, GetRandom) { 447 u8 buffer_1[32], buffer_2[32]; 448 for (bool blocking : { false, true }) { 449 EXPECT_FALSE(GetRandom(nullptr, 32, blocking)); 450 EXPECT_FALSE(GetRandom(buffer_1, 0, blocking)); 451 EXPECT_FALSE(GetRandom(buffer_1, 512, blocking)); 452 EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2)); 453 for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) { 454 for (uptr i = 0; i < 100; i++) { 455 EXPECT_TRUE(GetRandom(buffer_1, size, blocking)); 456 EXPECT_TRUE(GetRandom(buffer_2, size, blocking)); 457 EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0); 458 } 459 } 460 } 461 } 462 #endif 463 464 TEST(SanitizerCommon, ReservedAddressRangeInit) { 465 uptr init_size = 0xffff; 466 ReservedAddressRange address_range; 467 uptr res = address_range.Init(init_size); 468 CHECK_NE(res, (void*)-1); 469 UnmapOrDie((void*)res, init_size); 470 // Should be able to map into the same space now. 471 ReservedAddressRange address_range2; 472 uptr res2 = address_range2.Init(init_size, nullptr, res); 473 CHECK_EQ(res, res2); 474 475 // TODO(flowerhack): Once this is switched to the "real" implementation 476 // (rather than passing through to MmapNoAccess*), enforce and test "no 477 // double initializations allowed" 478 } 479 480 TEST(SanitizerCommon, ReservedAddressRangeMap) { 481 constexpr uptr init_size = 0xffff; 482 ReservedAddressRange address_range; 483 uptr res = address_range.Init(init_size); 484 CHECK_NE(res, (void*) -1); 485 486 // Valid mappings should succeed. 487 CHECK_EQ(res, address_range.Map(res, init_size)); 488 489 // Valid mappings should be readable. 490 unsigned char buffer[init_size]; 491 memcpy(buffer, reinterpret_cast<void *>(res), init_size); 492 493 // TODO(flowerhack): Once this is switched to the "real" implementation, make 494 // sure you can only mmap into offsets in the Init range. 495 } 496 497 TEST(SanitizerCommon, ReservedAddressRangeUnmap) { 498 uptr PageSize = GetPageSizeCached(); 499 uptr init_size = PageSize * 8; 500 ReservedAddressRange address_range; 501 uptr base_addr = address_range.Init(init_size); 502 CHECK_NE(base_addr, (void*)-1); 503 CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); 504 505 // Unmapping the entire range should succeed. 506 address_range.Unmap(base_addr, init_size); 507 508 // Map a new range. 509 base_addr = address_range.Init(init_size); 510 CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); 511 512 // Windows doesn't allow partial unmappings. 513 #if !SANITIZER_WINDOWS 514 515 // Unmapping at the beginning should succeed. 516 address_range.Unmap(base_addr, PageSize); 517 518 // Unmapping at the end should succeed. 519 uptr new_start = reinterpret_cast<uptr>(address_range.base()) + 520 address_range.size() - PageSize; 521 address_range.Unmap(new_start, PageSize); 522 523 #endif 524 525 // Unmapping in the middle of the ReservedAddressRange should fail. 526 EXPECT_DEATH(address_range.Unmap(base_addr + (PageSize * 2), PageSize), ".*"); 527 } 528 529 TEST(SanitizerCommon, ReadBinaryNameCached) { 530 char buf[256]; 531 EXPECT_NE((uptr)0, ReadBinaryNameCached(buf, sizeof(buf))); 532 } 533 534 } // namespace __sanitizer 535