1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and 11 /// FreeBSD-specific code. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "sanitizer_common/sanitizer_platform.h" 16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD 17 18 #include "hwasan.h" 19 #include "hwasan_dynamic_shadow.h" 20 #include "hwasan_interface_internal.h" 21 #include "hwasan_mapping.h" 22 #include "hwasan_report.h" 23 #include "hwasan_thread.h" 24 #include "hwasan_thread_list.h" 25 26 #include <dlfcn.h> 27 #include <elf.h> 28 #include <link.h> 29 #include <pthread.h> 30 #include <signal.h> 31 #include <stdio.h> 32 #include <stdlib.h> 33 #include <sys/resource.h> 34 #include <sys/time.h> 35 #include <unistd.h> 36 #include <unwind.h> 37 38 #include "sanitizer_common/sanitizer_common.h" 39 #include "sanitizer_common/sanitizer_procmaps.h" 40 41 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID. 42 // 43 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF 44 // Not currently tested. 45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON 46 // Integration tests downstream exist. 47 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF 48 // Tested with check-hwasan on x86_64-linux. 49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON 50 // Tested with check-hwasan on aarch64-linux-android. 51 #if !SANITIZER_ANDROID 52 SANITIZER_INTERFACE_ATTRIBUTE 53 THREADLOCAL uptr __hwasan_tls; 54 #endif 55 56 namespace __hwasan { 57 58 static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { 59 CHECK_EQ((beg % GetMmapGranularity()), 0); 60 CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); 61 uptr size = end - beg + 1; 62 DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. 63 if (!MmapFixedNoReserve(beg, size, name)) { 64 Report( 65 "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " 66 "Perhaps you're using ulimit -v\n", 67 size); 68 Abort(); 69 } 70 } 71 72 static void ProtectGap(uptr addr, uptr size) { 73 if (!size) 74 return; 75 void *res = MmapFixedNoAccess(addr, size, "shadow gap"); 76 if (addr == (uptr)res) 77 return; 78 // A few pages at the start of the address space can not be protected. 79 // But we really want to protect as much as possible, to prevent this memory 80 // being returned as a result of a non-FIXED mmap(). 81 if (addr == 0) { 82 uptr step = GetMmapGranularity(); 83 while (size > step) { 84 addr += step; 85 size -= step; 86 void *res = MmapFixedNoAccess(addr, size, "shadow gap"); 87 if (addr == (uptr)res) 88 return; 89 } 90 } 91 92 Report( 93 "ERROR: Failed to protect shadow gap [%p, %p]. " 94 "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr, 95 (void *)(addr + size)); 96 DumpProcessMap(); 97 Die(); 98 } 99 100 static uptr kLowMemStart; 101 static uptr kLowMemEnd; 102 static uptr kLowShadowEnd; 103 static uptr kLowShadowStart; 104 static uptr kHighShadowStart; 105 static uptr kHighShadowEnd; 106 static uptr kHighMemStart; 107 static uptr kHighMemEnd; 108 109 static void PrintRange(uptr start, uptr end, const char *name) { 110 Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); 111 } 112 113 static void PrintAddressSpaceLayout() { 114 PrintRange(kHighMemStart, kHighMemEnd, "HighMem"); 115 if (kHighShadowEnd + 1 < kHighMemStart) 116 PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap"); 117 else 118 CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); 119 PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow"); 120 if (kLowShadowEnd + 1 < kHighShadowStart) 121 PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap"); 122 else 123 CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); 124 PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow"); 125 if (kLowMemEnd + 1 < kLowShadowStart) 126 PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap"); 127 else 128 CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); 129 PrintRange(kLowMemStart, kLowMemEnd, "LowMem"); 130 CHECK_EQ(0, kLowMemStart); 131 } 132 133 static uptr GetHighMemEnd() { 134 // HighMem covers the upper part of the address space. 135 uptr max_address = GetMaxUserVirtualAddress(); 136 // Adjust max address to make sure that kHighMemEnd and kHighMemStart are 137 // properly aligned: 138 max_address |= (GetMmapGranularity() << kShadowScale) - 1; 139 return max_address; 140 } 141 142 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { 143 __hwasan_shadow_memory_dynamic_address = 144 FindDynamicShadowStart(shadow_size_bytes); 145 } 146 147 bool InitShadow() { 148 // Define the entire memory range. 149 kHighMemEnd = GetHighMemEnd(); 150 151 // Determine shadow memory base offset. 152 InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd)); 153 154 // Place the low memory first. 155 kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1; 156 kLowMemStart = 0; 157 158 // Define the low shadow based on the already placed low memory. 159 kLowShadowEnd = MemToShadow(kLowMemEnd); 160 kLowShadowStart = __hwasan_shadow_memory_dynamic_address; 161 162 // High shadow takes whatever memory is left up there (making sure it is not 163 // interfering with low memory in the fixed case). 164 kHighShadowEnd = MemToShadow(kHighMemEnd); 165 kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1; 166 167 // High memory starts where allocated shadow allows. 168 kHighMemStart = ShadowToMem(kHighShadowStart); 169 170 // Check the sanity of the defined memory ranges (there might be gaps). 171 CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); 172 CHECK_GT(kHighMemStart, kHighShadowEnd); 173 CHECK_GT(kHighShadowEnd, kHighShadowStart); 174 CHECK_GT(kHighShadowStart, kLowMemEnd); 175 CHECK_GT(kLowMemEnd, kLowMemStart); 176 CHECK_GT(kLowShadowEnd, kLowShadowStart); 177 CHECK_GT(kLowShadowStart, kLowMemEnd); 178 179 if (Verbosity()) 180 PrintAddressSpaceLayout(); 181 182 // Reserve shadow memory. 183 ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow"); 184 ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow"); 185 186 // Protect all the gaps. 187 ProtectGap(0, Min(kLowMemStart, kLowShadowStart)); 188 if (kLowMemEnd + 1 < kLowShadowStart) 189 ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1); 190 if (kLowShadowEnd + 1 < kHighShadowStart) 191 ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1); 192 if (kHighShadowEnd + 1 < kHighMemStart) 193 ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1); 194 195 return true; 196 } 197 198 void InitThreads() { 199 CHECK(__hwasan_shadow_memory_dynamic_address); 200 uptr guard_page_size = GetMmapGranularity(); 201 uptr thread_space_start = 202 __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment); 203 uptr thread_space_end = 204 __hwasan_shadow_memory_dynamic_address - guard_page_size; 205 ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1, 206 "hwasan threads"); 207 ProtectGap(thread_space_end, 208 __hwasan_shadow_memory_dynamic_address - thread_space_end); 209 InitThreadList(thread_space_start, thread_space_end - thread_space_start); 210 } 211 212 static void MadviseShadowRegion(uptr beg, uptr end) { 213 uptr size = end - beg + 1; 214 if (common_flags()->no_huge_pages_for_shadow) 215 NoHugePagesInRegion(beg, size); 216 if (common_flags()->use_madv_dontdump) 217 DontDumpShadowMemory(beg, size); 218 } 219 220 void MadviseShadow() { 221 MadviseShadowRegion(kLowShadowStart, kLowShadowEnd); 222 MadviseShadowRegion(kHighShadowStart, kHighShadowEnd); 223 } 224 225 bool MemIsApp(uptr p) { 226 CHECK(GetTagFromPointer(p) == 0); 227 return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); 228 } 229 230 static void HwasanAtExit(void) { 231 if (common_flags()->print_module_map) 232 DumpProcessMap(); 233 if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0)) 234 ReportStats(); 235 if (hwasan_report_count > 0) { 236 // ReportAtExitStatistics(); 237 if (common_flags()->exitcode) 238 internal__exit(common_flags()->exitcode); 239 } 240 } 241 242 void InstallAtExitHandler() { 243 atexit(HwasanAtExit); 244 } 245 246 // ---------------------- TSD ---------------- {{{1 247 248 extern "C" void __hwasan_thread_enter() { 249 hwasanThreadList().CreateCurrentThread()->InitRandomState(); 250 } 251 252 extern "C" void __hwasan_thread_exit() { 253 Thread *t = GetCurrentThread(); 254 // Make sure that signal handler can not see a stale current thread pointer. 255 atomic_signal_fence(memory_order_seq_cst); 256 if (t) 257 hwasanThreadList().ReleaseThread(t); 258 } 259 260 #if HWASAN_WITH_INTERCEPTORS 261 static pthread_key_t tsd_key; 262 static bool tsd_key_inited = false; 263 264 void HwasanTSDThreadInit() { 265 if (tsd_key_inited) 266 CHECK_EQ(0, pthread_setspecific(tsd_key, 267 (void *)GetPthreadDestructorIterations())); 268 } 269 270 void HwasanTSDDtor(void *tsd) { 271 uptr iterations = (uptr)tsd; 272 if (iterations > 1) { 273 CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1))); 274 return; 275 } 276 __hwasan_thread_exit(); 277 } 278 279 void HwasanTSDInit() { 280 CHECK(!tsd_key_inited); 281 tsd_key_inited = true; 282 CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor)); 283 } 284 #else 285 void HwasanTSDInit() {} 286 void HwasanTSDThreadInit() {} 287 #endif 288 289 #if SANITIZER_ANDROID 290 uptr *GetCurrentThreadLongPtr() { 291 return (uptr *)get_android_tls_ptr(); 292 } 293 #else 294 uptr *GetCurrentThreadLongPtr() { 295 return &__hwasan_tls; 296 } 297 #endif 298 299 #if SANITIZER_ANDROID 300 void AndroidTestTlsSlot() { 301 uptr kMagicValue = 0x010203040A0B0C0D; 302 uptr *tls_ptr = GetCurrentThreadLongPtr(); 303 uptr old_value = *tls_ptr; 304 *tls_ptr = kMagicValue; 305 dlerror(); 306 if (*(uptr *)get_android_tls_ptr() != kMagicValue) { 307 Printf( 308 "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used " 309 "for dlerror().\n"); 310 Die(); 311 } 312 *tls_ptr = old_value; 313 } 314 #else 315 void AndroidTestTlsSlot() {} 316 #endif 317 318 Thread *GetCurrentThread() { 319 uptr *ThreadLong = GetCurrentThreadLongPtr(); 320 #if HWASAN_WITH_INTERCEPTORS 321 if (!*ThreadLong) 322 __hwasan_thread_enter(); 323 #endif 324 auto *R = (StackAllocationsRingBuffer *)ThreadLong; 325 return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next())); 326 } 327 328 struct AccessInfo { 329 uptr addr; 330 uptr size; 331 bool is_store; 332 bool is_load; 333 bool recover; 334 }; 335 336 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { 337 // Access type is passed in a platform dependent way (see below) and encoded 338 // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is 339 // recoverable. Valid values of Y are 0 to 4, which are interpreted as 340 // log2(access_size), and 0xF, which means that access size is passed via 341 // platform dependent register (see below). 342 #if defined(__aarch64__) 343 // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, 344 // access size is stored in X1 register. Access address is always in X0 345 // register. 346 uptr pc = (uptr)info->si_addr; 347 const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; 348 if ((code & 0xff00) != 0x900) 349 return AccessInfo{}; // Not ours. 350 351 const bool is_store = code & 0x10; 352 const bool recover = code & 0x20; 353 const uptr addr = uc->uc_mcontext.regs[0]; 354 const unsigned size_log = code & 0xf; 355 if (size_log > 4 && size_log != 0xf) 356 return AccessInfo{}; // Not ours. 357 const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; 358 359 #elif defined(__x86_64__) 360 // Access type is encoded in the instruction following INT3 as 361 // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in 362 // RSI register. Access address is always in RDI register. 363 uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; 364 uint8_t *nop = (uint8_t*)pc; 365 if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || 366 *(nop + 3) < 0x40) 367 return AccessInfo{}; // Not ours. 368 const unsigned code = *(nop + 3); 369 370 const bool is_store = code & 0x10; 371 const bool recover = code & 0x20; 372 const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; 373 const unsigned size_log = code & 0xf; 374 if (size_log > 4 && size_log != 0xf) 375 return AccessInfo{}; // Not ours. 376 const uptr size = 377 size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; 378 379 #else 380 # error Unsupported architecture 381 #endif 382 383 return AccessInfo{addr, size, is_store, !is_store, recover}; 384 } 385 386 static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, 387 ucontext_t *uc, uptr *registers_frame = nullptr) { 388 InternalMmapVector<BufferedStackTrace> stack_buffer(1); 389 BufferedStackTrace *stack = stack_buffer.data(); 390 stack->Reset(); 391 stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal); 392 393 // The second stack frame contains the failure __hwasan_check function, as 394 // we have a stack frame for the registers saved in __hwasan_tag_mismatch that 395 // we wish to ignore. This (currently) only occurs on AArch64, as x64 396 // implementations use SIGTRAP to implement the failure, and thus do not go 397 // through the stack saver. 398 if (registers_frame && stack->trace && stack->size > 0) { 399 stack->trace++; 400 stack->size--; 401 } 402 403 bool fatal = flags()->halt_on_error || !ai.recover; 404 ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal, 405 registers_frame); 406 } 407 408 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { 409 AccessInfo ai = GetAccessInfo(info, uc); 410 if (!ai.is_store && !ai.is_load) 411 return false; 412 413 SignalContext sig{info, uc}; 414 HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc); 415 416 #if defined(__aarch64__) 417 uc->uc_mcontext.pc += 4; 418 #elif defined(__x86_64__) 419 #else 420 # error Unsupported architecture 421 #endif 422 return true; 423 } 424 425 // Entry point stub for interoperability between __hwasan_tag_mismatch (ASM) and 426 // the rest of the mismatch handling code (C++). 427 extern "C" void __hwasan_tag_mismatch_stub(uptr addr, uptr access_info, 428 uptr *registers_frame) { 429 AccessInfo ai; 430 ai.is_store = access_info & 0x10; 431 ai.recover = false; 432 ai.addr = addr; 433 ai.size = 1 << (access_info & 0xf); 434 435 HandleTagMismatch(ai, (uptr)__builtin_return_address(0), 436 (uptr)__builtin_frame_address(0), nullptr, registers_frame); 437 __builtin_unreachable(); 438 } 439 440 static void OnStackUnwind(const SignalContext &sig, const void *, 441 BufferedStackTrace *stack) { 442 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 443 common_flags()->fast_unwind_on_fatal); 444 } 445 446 void HwasanOnDeadlySignal(int signo, void *info, void *context) { 447 // Probably a tag mismatch. 448 if (signo == SIGTRAP) 449 if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context)) 450 return; 451 452 HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); 453 } 454 455 456 } // namespace __hwasan 457 458 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD 459