13cab2bb3Spatrick //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick ///
93cab2bb3Spatrick /// \file
103cab2bb3Spatrick /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
113cab2bb3Spatrick /// FreeBSD-specific code.
123cab2bb3Spatrick ///
133cab2bb3Spatrick //===----------------------------------------------------------------------===//
143cab2bb3Spatrick
153cab2bb3Spatrick #include "sanitizer_common/sanitizer_platform.h"
163cab2bb3Spatrick #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
173cab2bb3Spatrick
18*810390e3Srobert # include <dlfcn.h>
19*810390e3Srobert # include <elf.h>
20*810390e3Srobert # include <errno.h>
21*810390e3Srobert # include <link.h>
22*810390e3Srobert # include <pthread.h>
23*810390e3Srobert # include <signal.h>
24*810390e3Srobert # include <stdio.h>
25*810390e3Srobert # include <stdlib.h>
26*810390e3Srobert # include <sys/prctl.h>
27*810390e3Srobert # include <sys/resource.h>
28*810390e3Srobert # include <sys/time.h>
29*810390e3Srobert # include <unistd.h>
30*810390e3Srobert # include <unwind.h>
31*810390e3Srobert
323cab2bb3Spatrick # include "hwasan.h"
333cab2bb3Spatrick # include "hwasan_dynamic_shadow.h"
343cab2bb3Spatrick # include "hwasan_interface_internal.h"
353cab2bb3Spatrick # include "hwasan_mapping.h"
363cab2bb3Spatrick # include "hwasan_report.h"
373cab2bb3Spatrick # include "hwasan_thread.h"
383cab2bb3Spatrick # include "hwasan_thread_list.h"
393cab2bb3Spatrick # include "sanitizer_common/sanitizer_common.h"
403cab2bb3Spatrick # include "sanitizer_common/sanitizer_procmaps.h"
41*810390e3Srobert # include "sanitizer_common/sanitizer_stackdepot.h"
423cab2bb3Spatrick
433cab2bb3Spatrick // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
443cab2bb3Spatrick //
453cab2bb3Spatrick // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
463cab2bb3Spatrick // Not currently tested.
473cab2bb3Spatrick // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
483cab2bb3Spatrick // Integration tests downstream exist.
493cab2bb3Spatrick // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
503cab2bb3Spatrick // Tested with check-hwasan on x86_64-linux.
513cab2bb3Spatrick // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
523cab2bb3Spatrick // Tested with check-hwasan on aarch64-linux-android.
533cab2bb3Spatrick # if !SANITIZER_ANDROID
543cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
553cab2bb3Spatrick THREADLOCAL uptr __hwasan_tls;
563cab2bb3Spatrick # endif
573cab2bb3Spatrick
583cab2bb3Spatrick namespace __hwasan {
593cab2bb3Spatrick
60d89ec533Spatrick // With the zero shadow base we can not actually map pages starting from 0.
61d89ec533Spatrick // This constant is somewhat arbitrary.
62d89ec533Spatrick constexpr uptr kZeroBaseShadowStart = 0;
63d89ec533Spatrick constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
643cab2bb3Spatrick
ProtectGap(uptr addr,uptr size)653cab2bb3Spatrick static void ProtectGap(uptr addr, uptr size) {
66d89ec533Spatrick __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
67d89ec533Spatrick kZeroBaseMaxShadowStart);
683cab2bb3Spatrick }
693cab2bb3Spatrick
70d89ec533Spatrick uptr kLowMemStart;
71d89ec533Spatrick uptr kLowMemEnd;
72d89ec533Spatrick uptr kHighMemStart;
73d89ec533Spatrick uptr kHighMemEnd;
743cab2bb3Spatrick
PrintRange(uptr start,uptr end,const char * name)753cab2bb3Spatrick static void PrintRange(uptr start, uptr end, const char *name) {
763cab2bb3Spatrick Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
773cab2bb3Spatrick }
783cab2bb3Spatrick
PrintAddressSpaceLayout()793cab2bb3Spatrick static void PrintAddressSpaceLayout() {
803cab2bb3Spatrick PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
813cab2bb3Spatrick if (kHighShadowEnd + 1 < kHighMemStart)
823cab2bb3Spatrick PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
833cab2bb3Spatrick else
843cab2bb3Spatrick CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
853cab2bb3Spatrick PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
863cab2bb3Spatrick if (kLowShadowEnd + 1 < kHighShadowStart)
873cab2bb3Spatrick PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
883cab2bb3Spatrick else
893cab2bb3Spatrick CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
903cab2bb3Spatrick PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
913cab2bb3Spatrick if (kLowMemEnd + 1 < kLowShadowStart)
923cab2bb3Spatrick PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
933cab2bb3Spatrick else
943cab2bb3Spatrick CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
953cab2bb3Spatrick PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
963cab2bb3Spatrick CHECK_EQ(0, kLowMemStart);
973cab2bb3Spatrick }
983cab2bb3Spatrick
GetHighMemEnd()993cab2bb3Spatrick static uptr GetHighMemEnd() {
1003cab2bb3Spatrick // HighMem covers the upper part of the address space.
1013cab2bb3Spatrick uptr max_address = GetMaxUserVirtualAddress();
1023cab2bb3Spatrick // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
1033cab2bb3Spatrick // properly aligned:
1043cab2bb3Spatrick max_address |= (GetMmapGranularity() << kShadowScale) - 1;
1053cab2bb3Spatrick return max_address;
1063cab2bb3Spatrick }
1073cab2bb3Spatrick
InitializeShadowBaseAddress(uptr shadow_size_bytes)1083cab2bb3Spatrick static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
1093cab2bb3Spatrick __hwasan_shadow_memory_dynamic_address =
1103cab2bb3Spatrick FindDynamicShadowStart(shadow_size_bytes);
1113cab2bb3Spatrick }
1123cab2bb3Spatrick
MaybeDieIfNoTaggingAbi(const char * message)113*810390e3Srobert static void MaybeDieIfNoTaggingAbi(const char *message) {
114*810390e3Srobert if (!flags()->fail_without_syscall_abi)
115*810390e3Srobert return;
116*810390e3Srobert Printf("FATAL: %s\n", message);
117*810390e3Srobert Die();
118*810390e3Srobert }
119*810390e3Srobert
1203cab2bb3Spatrick # define PR_SET_TAGGED_ADDR_CTRL 55
1213cab2bb3Spatrick # define PR_GET_TAGGED_ADDR_CTRL 56
1223cab2bb3Spatrick # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
123*810390e3Srobert # define ARCH_GET_UNTAG_MASK 0x4001
124*810390e3Srobert # define ARCH_ENABLE_TAGGED_ADDR 0x4002
125*810390e3Srobert # define ARCH_GET_MAX_TAG_BITS 0x4003
126*810390e3Srobert
CanUseTaggingAbi()127*810390e3Srobert static bool CanUseTaggingAbi() {
128*810390e3Srobert # if defined(__x86_64__)
129*810390e3Srobert unsigned long num_bits = 0;
130*810390e3Srobert // Check for x86 LAM support. This API is based on a currently unsubmitted
131*810390e3Srobert // patch to the Linux kernel (as of August 2022) and is thus subject to
132*810390e3Srobert // change. The patch is here:
133*810390e3Srobert // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
134*810390e3Srobert //
135*810390e3Srobert // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
136*810390e3Srobert // bits the user can request, or zero if LAM is not supported by the hardware.
137*810390e3Srobert if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
138*810390e3Srobert reinterpret_cast<uptr>(&num_bits))))
139*810390e3Srobert return false;
140*810390e3Srobert // The platform must provide enough bits for HWASan tags.
141*810390e3Srobert if (num_bits < kTagBits)
142*810390e3Srobert return false;
143*810390e3Srobert return true;
144*810390e3Srobert # else
145*810390e3Srobert // Check for ARM TBI support.
146*810390e3Srobert return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
147*810390e3Srobert # endif // __x86_64__
148*810390e3Srobert }
149*810390e3Srobert
EnableTaggingAbi()150*810390e3Srobert static bool EnableTaggingAbi() {
151*810390e3Srobert # if defined(__x86_64__)
152*810390e3Srobert // Enable x86 LAM tagging for the process.
153*810390e3Srobert //
154*810390e3Srobert // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
155*810390e3Srobert // tag bits requested by the user does not exceed that provided by the system.
156*810390e3Srobert // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
157*810390e3Srobert // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
158*810390e3Srobert // is not supported by the hardware.
159*810390e3Srobert if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
160*810390e3Srobert return false;
161*810390e3Srobert unsigned long mask = 0;
162*810390e3Srobert // Make sure the tag bits are where we expect them to be.
163*810390e3Srobert if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
164*810390e3Srobert reinterpret_cast<uptr>(&mask))))
165*810390e3Srobert return false;
166*810390e3Srobert // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
167*810390e3Srobert // bits. Therefore these masks must not overlap.
168*810390e3Srobert if (mask & kAddressTagMask)
169*810390e3Srobert return false;
170*810390e3Srobert return true;
171*810390e3Srobert # else
172*810390e3Srobert // Enable ARM TBI tagging for the process. If for some reason tagging is not
173*810390e3Srobert // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
174*810390e3Srobert // -EINVAL.
175*810390e3Srobert if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
176*810390e3Srobert PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
177*810390e3Srobert return false;
178*810390e3Srobert // Ensure that TBI is enabled.
179*810390e3Srobert if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
180*810390e3Srobert PR_TAGGED_ADDR_ENABLE)
181*810390e3Srobert return false;
182*810390e3Srobert return true;
183*810390e3Srobert # endif // __x86_64__
184*810390e3Srobert }
185*810390e3Srobert
InitializeOsSupport()186*810390e3Srobert void InitializeOsSupport() {
1873cab2bb3Spatrick // Check we're running on a kernel that can use the tagged address ABI.
188*810390e3Srobert bool has_abi = CanUseTaggingAbi();
189*810390e3Srobert
190*810390e3Srobert if (!has_abi) {
191d89ec533Spatrick # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
1923cab2bb3Spatrick // Some older Android kernels have the tagged pointer ABI on
1933cab2bb3Spatrick // unconditionally, and hence don't have the tagged-addr prctl while still
1943cab2bb3Spatrick // allow the ABI.
1953cab2bb3Spatrick // If targeting Android and the prctl is not around we assume this is the
1963cab2bb3Spatrick // case.
1973cab2bb3Spatrick return;
1983cab2bb3Spatrick # else
199*810390e3Srobert MaybeDieIfNoTaggingAbi(
200*810390e3Srobert "HWAddressSanitizer requires a kernel with tagged address ABI.");
2013cab2bb3Spatrick # endif
2023cab2bb3Spatrick }
2033cab2bb3Spatrick
204*810390e3Srobert if (EnableTaggingAbi())
205d89ec533Spatrick return;
206*810390e3Srobert
207*810390e3Srobert # if SANITIZER_ANDROID
208*810390e3Srobert MaybeDieIfNoTaggingAbi(
209*810390e3Srobert "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
210*810390e3Srobert "Check the `sysctl abi.tagged_addr_disabled` configuration.");
211*810390e3Srobert # else
212*810390e3Srobert MaybeDieIfNoTaggingAbi(
213*810390e3Srobert "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
214*810390e3Srobert # endif
2153cab2bb3Spatrick }
2163cab2bb3Spatrick
InitShadow()2173cab2bb3Spatrick bool InitShadow() {
2183cab2bb3Spatrick // Define the entire memory range.
2193cab2bb3Spatrick kHighMemEnd = GetHighMemEnd();
2203cab2bb3Spatrick
2213cab2bb3Spatrick // Determine shadow memory base offset.
2223cab2bb3Spatrick InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
2233cab2bb3Spatrick
2243cab2bb3Spatrick // Place the low memory first.
2253cab2bb3Spatrick kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
2263cab2bb3Spatrick kLowMemStart = 0;
2273cab2bb3Spatrick
2283cab2bb3Spatrick // Define the low shadow based on the already placed low memory.
2293cab2bb3Spatrick kLowShadowEnd = MemToShadow(kLowMemEnd);
2303cab2bb3Spatrick kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
2313cab2bb3Spatrick
2323cab2bb3Spatrick // High shadow takes whatever memory is left up there (making sure it is not
2333cab2bb3Spatrick // interfering with low memory in the fixed case).
2343cab2bb3Spatrick kHighShadowEnd = MemToShadow(kHighMemEnd);
2353cab2bb3Spatrick kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
2363cab2bb3Spatrick
2373cab2bb3Spatrick // High memory starts where allocated shadow allows.
2383cab2bb3Spatrick kHighMemStart = ShadowToMem(kHighShadowStart);
2393cab2bb3Spatrick
2403cab2bb3Spatrick // Check the sanity of the defined memory ranges (there might be gaps).
2413cab2bb3Spatrick CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
2423cab2bb3Spatrick CHECK_GT(kHighMemStart, kHighShadowEnd);
2433cab2bb3Spatrick CHECK_GT(kHighShadowEnd, kHighShadowStart);
2443cab2bb3Spatrick CHECK_GT(kHighShadowStart, kLowMemEnd);
2453cab2bb3Spatrick CHECK_GT(kLowMemEnd, kLowMemStart);
2463cab2bb3Spatrick CHECK_GT(kLowShadowEnd, kLowShadowStart);
2473cab2bb3Spatrick CHECK_GT(kLowShadowStart, kLowMemEnd);
2483cab2bb3Spatrick
2493cab2bb3Spatrick if (Verbosity())
2503cab2bb3Spatrick PrintAddressSpaceLayout();
2513cab2bb3Spatrick
2523cab2bb3Spatrick // Reserve shadow memory.
2533cab2bb3Spatrick ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
2543cab2bb3Spatrick ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
2553cab2bb3Spatrick
2563cab2bb3Spatrick // Protect all the gaps.
2573cab2bb3Spatrick ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
2583cab2bb3Spatrick if (kLowMemEnd + 1 < kLowShadowStart)
2593cab2bb3Spatrick ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
2603cab2bb3Spatrick if (kLowShadowEnd + 1 < kHighShadowStart)
2613cab2bb3Spatrick ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
2623cab2bb3Spatrick if (kHighShadowEnd + 1 < kHighMemStart)
2633cab2bb3Spatrick ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
2643cab2bb3Spatrick
2653cab2bb3Spatrick return true;
2663cab2bb3Spatrick }
2673cab2bb3Spatrick
InitThreads()2683cab2bb3Spatrick void InitThreads() {
2693cab2bb3Spatrick CHECK(__hwasan_shadow_memory_dynamic_address);
2703cab2bb3Spatrick uptr guard_page_size = GetMmapGranularity();
2713cab2bb3Spatrick uptr thread_space_start =
2723cab2bb3Spatrick __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
2733cab2bb3Spatrick uptr thread_space_end =
2743cab2bb3Spatrick __hwasan_shadow_memory_dynamic_address - guard_page_size;
2753cab2bb3Spatrick ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
276d89ec533Spatrick "hwasan threads", /*madvise_shadow*/ false);
2773cab2bb3Spatrick ProtectGap(thread_space_end,
2783cab2bb3Spatrick __hwasan_shadow_memory_dynamic_address - thread_space_end);
2793cab2bb3Spatrick InitThreadList(thread_space_start, thread_space_end - thread_space_start);
280d89ec533Spatrick hwasanThreadList().CreateCurrentThread();
2813cab2bb3Spatrick }
2823cab2bb3Spatrick
MemIsApp(uptr p)2833cab2bb3Spatrick bool MemIsApp(uptr p) {
284d89ec533Spatrick // Memory outside the alias range has non-zero tags.
285d89ec533Spatrick # if !defined(HWASAN_ALIASING_MODE)
2863cab2bb3Spatrick CHECK(GetTagFromPointer(p) == 0);
287d89ec533Spatrick # endif
2883cab2bb3Spatrick
289*810390e3Srobert return (p >= kHighMemStart && p <= kHighMemEnd) ||
290*810390e3Srobert (p >= kLowMemStart && p <= kLowMemEnd);
2913cab2bb3Spatrick }
2923cab2bb3Spatrick
InstallAtExitHandler()293*810390e3Srobert void InstallAtExitHandler() { atexit(HwasanAtExit); }
2943cab2bb3Spatrick
2953cab2bb3Spatrick // ---------------------- TSD ---------------- {{{1
2963cab2bb3Spatrick
__hwasan_thread_enter()2973cab2bb3Spatrick extern "C" void __hwasan_thread_enter() {
298*810390e3Srobert hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
2993cab2bb3Spatrick }
3003cab2bb3Spatrick
__hwasan_thread_exit()3013cab2bb3Spatrick extern "C" void __hwasan_thread_exit() {
3023cab2bb3Spatrick Thread *t = GetCurrentThread();
3033cab2bb3Spatrick // Make sure that signal handler can not see a stale current thread pointer.
3043cab2bb3Spatrick atomic_signal_fence(memory_order_seq_cst);
3053cab2bb3Spatrick if (t)
3063cab2bb3Spatrick hwasanThreadList().ReleaseThread(t);
3073cab2bb3Spatrick }
3083cab2bb3Spatrick
3093cab2bb3Spatrick # if HWASAN_WITH_INTERCEPTORS
3103cab2bb3Spatrick static pthread_key_t tsd_key;
3113cab2bb3Spatrick static bool tsd_key_inited = false;
3123cab2bb3Spatrick
HwasanTSDThreadInit()3133cab2bb3Spatrick void HwasanTSDThreadInit() {
3143cab2bb3Spatrick if (tsd_key_inited)
3153cab2bb3Spatrick CHECK_EQ(0, pthread_setspecific(tsd_key,
3163cab2bb3Spatrick (void *)GetPthreadDestructorIterations()));
3173cab2bb3Spatrick }
3183cab2bb3Spatrick
HwasanTSDDtor(void * tsd)3193cab2bb3Spatrick void HwasanTSDDtor(void *tsd) {
3203cab2bb3Spatrick uptr iterations = (uptr)tsd;
3213cab2bb3Spatrick if (iterations > 1) {
3223cab2bb3Spatrick CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
3233cab2bb3Spatrick return;
3243cab2bb3Spatrick }
3253cab2bb3Spatrick __hwasan_thread_exit();
3263cab2bb3Spatrick }
3273cab2bb3Spatrick
HwasanTSDInit()3283cab2bb3Spatrick void HwasanTSDInit() {
3293cab2bb3Spatrick CHECK(!tsd_key_inited);
3303cab2bb3Spatrick tsd_key_inited = true;
3313cab2bb3Spatrick CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
3323cab2bb3Spatrick }
3333cab2bb3Spatrick # else
HwasanTSDInit()3343cab2bb3Spatrick void HwasanTSDInit() {}
HwasanTSDThreadInit()3353cab2bb3Spatrick void HwasanTSDThreadInit() {}
3363cab2bb3Spatrick # endif
3373cab2bb3Spatrick
3383cab2bb3Spatrick # if SANITIZER_ANDROID
GetCurrentThreadLongPtr()339*810390e3Srobert uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
3403cab2bb3Spatrick # else
GetCurrentThreadLongPtr()341*810390e3Srobert uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
3423cab2bb3Spatrick # endif
3433cab2bb3Spatrick
3443cab2bb3Spatrick # if SANITIZER_ANDROID
AndroidTestTlsSlot()3453cab2bb3Spatrick void AndroidTestTlsSlot() {
3463cab2bb3Spatrick uptr kMagicValue = 0x010203040A0B0C0D;
3473cab2bb3Spatrick uptr *tls_ptr = GetCurrentThreadLongPtr();
3483cab2bb3Spatrick uptr old_value = *tls_ptr;
3493cab2bb3Spatrick *tls_ptr = kMagicValue;
3503cab2bb3Spatrick dlerror();
3513cab2bb3Spatrick if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
3523cab2bb3Spatrick Printf(
3533cab2bb3Spatrick "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
3543cab2bb3Spatrick "for dlerror().\n");
3553cab2bb3Spatrick Die();
3563cab2bb3Spatrick }
3573cab2bb3Spatrick *tls_ptr = old_value;
3583cab2bb3Spatrick }
3593cab2bb3Spatrick # else
AndroidTestTlsSlot()3603cab2bb3Spatrick void AndroidTestTlsSlot() {}
3613cab2bb3Spatrick # endif
3623cab2bb3Spatrick
GetAccessInfo(siginfo_t * info,ucontext_t * uc)3633cab2bb3Spatrick static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
3643cab2bb3Spatrick // Access type is passed in a platform dependent way (see below) and encoded
3653cab2bb3Spatrick // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
3663cab2bb3Spatrick // recoverable. Valid values of Y are 0 to 4, which are interpreted as
3673cab2bb3Spatrick // log2(access_size), and 0xF, which means that access size is passed via
3683cab2bb3Spatrick // platform dependent register (see below).
3693cab2bb3Spatrick # if defined(__aarch64__)
3703cab2bb3Spatrick // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
3713cab2bb3Spatrick // access size is stored in X1 register. Access address is always in X0
3723cab2bb3Spatrick // register.
3733cab2bb3Spatrick uptr pc = (uptr)info->si_addr;
3743cab2bb3Spatrick const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
3753cab2bb3Spatrick if ((code & 0xff00) != 0x900)
3763cab2bb3Spatrick return AccessInfo{}; // Not ours.
3773cab2bb3Spatrick
3783cab2bb3Spatrick const bool is_store = code & 0x10;
3793cab2bb3Spatrick const bool recover = code & 0x20;
3803cab2bb3Spatrick const uptr addr = uc->uc_mcontext.regs[0];
3813cab2bb3Spatrick const unsigned size_log = code & 0xf;
3823cab2bb3Spatrick if (size_log > 4 && size_log != 0xf)
3833cab2bb3Spatrick return AccessInfo{}; // Not ours.
3843cab2bb3Spatrick const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
3853cab2bb3Spatrick
3863cab2bb3Spatrick # elif defined(__x86_64__)
3873cab2bb3Spatrick // Access type is encoded in the instruction following INT3 as
3883cab2bb3Spatrick // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
3893cab2bb3Spatrick // RSI register. Access address is always in RDI register.
3903cab2bb3Spatrick uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
3913cab2bb3Spatrick uint8_t *nop = (uint8_t *)pc;
3923cab2bb3Spatrick if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
3933cab2bb3Spatrick *(nop + 3) < 0x40)
3943cab2bb3Spatrick return AccessInfo{}; // Not ours.
3953cab2bb3Spatrick const unsigned code = *(nop + 3);
3963cab2bb3Spatrick
3973cab2bb3Spatrick const bool is_store = code & 0x10;
3983cab2bb3Spatrick const bool recover = code & 0x20;
3993cab2bb3Spatrick const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
4003cab2bb3Spatrick const unsigned size_log = code & 0xf;
4013cab2bb3Spatrick if (size_log > 4 && size_log != 0xf)
4023cab2bb3Spatrick return AccessInfo{}; // Not ours.
4033cab2bb3Spatrick const uptr size =
4043cab2bb3Spatrick size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
4053cab2bb3Spatrick
406*810390e3Srobert # elif SANITIZER_RISCV64
407*810390e3Srobert // Access type is encoded in the instruction following EBREAK as
408*810390e3Srobert // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
409*810390e3Srobert // X11 register. Access address is always in X10 register.
410*810390e3Srobert uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
411*810390e3Srobert uint8_t byte1 = *((u8 *)(pc + 0));
412*810390e3Srobert uint8_t byte2 = *((u8 *)(pc + 1));
413*810390e3Srobert uint8_t byte3 = *((u8 *)(pc + 2));
414*810390e3Srobert uint8_t byte4 = *((u8 *)(pc + 3));
415*810390e3Srobert uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
416*810390e3Srobert bool isFaultShort = false;
417*810390e3Srobert bool isEbreak = (ebreak == 0x100073);
418*810390e3Srobert bool isShortEbreak = false;
419*810390e3Srobert # if defined(__riscv_compressed)
420*810390e3Srobert isFaultShort = ((ebreak & 0x3) != 0x3);
421*810390e3Srobert isShortEbreak = ((ebreak & 0xffff) == 0x9002);
422*810390e3Srobert # endif
423*810390e3Srobert // faulted insn is not ebreak, not our case
424*810390e3Srobert if (!(isEbreak || isShortEbreak))
425*810390e3Srobert return AccessInfo{};
426*810390e3Srobert // advance pc to point after ebreak and reconstruct addi instruction
427*810390e3Srobert pc += isFaultShort ? 2 : 4;
428*810390e3Srobert byte1 = *((u8 *)(pc + 0));
429*810390e3Srobert byte2 = *((u8 *)(pc + 1));
430*810390e3Srobert byte3 = *((u8 *)(pc + 2));
431*810390e3Srobert byte4 = *((u8 *)(pc + 3));
432*810390e3Srobert // reconstruct instruction
433*810390e3Srobert uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
434*810390e3Srobert // check if this is really 32 bit instruction
435*810390e3Srobert // code is encoded in top 12 bits, since instruction is supposed to be with
436*810390e3Srobert // imm
437*810390e3Srobert const unsigned code = (instr >> 20) & 0xffff;
438*810390e3Srobert const uptr addr = uc->uc_mcontext.__gregs[10];
439*810390e3Srobert const bool is_store = code & 0x10;
440*810390e3Srobert const bool recover = code & 0x20;
441*810390e3Srobert const unsigned size_log = code & 0xf;
442*810390e3Srobert if (size_log > 4 && size_log != 0xf)
443*810390e3Srobert return AccessInfo{}; // Not our case
444*810390e3Srobert const uptr size =
445*810390e3Srobert size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
446*810390e3Srobert
4473cab2bb3Spatrick # else
4483cab2bb3Spatrick # error Unsupported architecture
4493cab2bb3Spatrick # endif
4503cab2bb3Spatrick
4513cab2bb3Spatrick return AccessInfo{addr, size, is_store, !is_store, recover};
4523cab2bb3Spatrick }
4533cab2bb3Spatrick
HwasanOnSIGTRAP(int signo,siginfo_t * info,ucontext_t * uc)4543cab2bb3Spatrick static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
4553cab2bb3Spatrick AccessInfo ai = GetAccessInfo(info, uc);
4563cab2bb3Spatrick if (!ai.is_store && !ai.is_load)
4573cab2bb3Spatrick return false;
4583cab2bb3Spatrick
4593cab2bb3Spatrick SignalContext sig{info, uc};
4603cab2bb3Spatrick HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
4613cab2bb3Spatrick
4623cab2bb3Spatrick # if defined(__aarch64__)
4633cab2bb3Spatrick uc->uc_mcontext.pc += 4;
4643cab2bb3Spatrick # elif defined(__x86_64__)
465*810390e3Srobert # elif SANITIZER_RISCV64
466*810390e3Srobert // pc points to EBREAK which is 2 bytes long
467*810390e3Srobert uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
468*810390e3Srobert uint8_t byte1 = (uint8_t)(*(exception_source + 0));
469*810390e3Srobert uint8_t byte2 = (uint8_t)(*(exception_source + 1));
470*810390e3Srobert uint8_t byte3 = (uint8_t)(*(exception_source + 2));
471*810390e3Srobert uint8_t byte4 = (uint8_t)(*(exception_source + 3));
472*810390e3Srobert uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
473*810390e3Srobert bool isFaultShort = false;
474*810390e3Srobert # if defined(__riscv_compressed)
475*810390e3Srobert isFaultShort = ((faulted & 0x3) != 0x3);
476*810390e3Srobert # endif
477*810390e3Srobert uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
4783cab2bb3Spatrick # else
4793cab2bb3Spatrick # error Unsupported architecture
4803cab2bb3Spatrick # endif
4813cab2bb3Spatrick return true;
4823cab2bb3Spatrick }
4833cab2bb3Spatrick
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)4843cab2bb3Spatrick static void OnStackUnwind(const SignalContext &sig, const void *,
4853cab2bb3Spatrick BufferedStackTrace *stack) {
4863cab2bb3Spatrick stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
4873cab2bb3Spatrick common_flags()->fast_unwind_on_fatal);
4883cab2bb3Spatrick }
4893cab2bb3Spatrick
HwasanOnDeadlySignal(int signo,void * info,void * context)4903cab2bb3Spatrick void HwasanOnDeadlySignal(int signo, void *info, void *context) {
4913cab2bb3Spatrick // Probably a tag mismatch.
4923cab2bb3Spatrick if (signo == SIGTRAP)
4933cab2bb3Spatrick if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
4943cab2bb3Spatrick return;
4953cab2bb3Spatrick
4963cab2bb3Spatrick HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
4973cab2bb3Spatrick }
4983cab2bb3Spatrick
InitStackAndTls(const InitState *)499d89ec533Spatrick void Thread::InitStackAndTls(const InitState *) {
500d89ec533Spatrick uptr tls_size;
501d89ec533Spatrick uptr stack_size;
502d89ec533Spatrick GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
503d89ec533Spatrick &tls_size);
504d89ec533Spatrick stack_top_ = stack_bottom_ + stack_size;
505d89ec533Spatrick tls_end_ = tls_begin_ + tls_size;
506d89ec533Spatrick }
507d89ec533Spatrick
TagMemoryAligned(uptr p,uptr size,tag_t tag)508d89ec533Spatrick uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
509d89ec533Spatrick CHECK(IsAligned(p, kShadowAlignment));
510d89ec533Spatrick CHECK(IsAligned(size, kShadowAlignment));
511d89ec533Spatrick uptr shadow_start = MemToShadow(p);
512d89ec533Spatrick uptr shadow_size = MemToShadowSize(size);
513d89ec533Spatrick
514d89ec533Spatrick uptr page_size = GetPageSizeCached();
515d89ec533Spatrick uptr page_start = RoundUpTo(shadow_start, page_size);
516d89ec533Spatrick uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
517d89ec533Spatrick uptr threshold = common_flags()->clear_shadow_mmap_threshold;
518d89ec533Spatrick if (SANITIZER_LINUX &&
519d89ec533Spatrick UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
520d89ec533Spatrick internal_memset((void *)shadow_start, tag, page_start - shadow_start);
521d89ec533Spatrick internal_memset((void *)page_end, tag,
522d89ec533Spatrick shadow_start + shadow_size - page_end);
523d89ec533Spatrick // For an anonymous private mapping MADV_DONTNEED will return a zero page on
524d89ec533Spatrick // Linux.
525d89ec533Spatrick ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
526d89ec533Spatrick } else {
527d89ec533Spatrick internal_memset((void *)shadow_start, tag, shadow_size);
528d89ec533Spatrick }
529d89ec533Spatrick return AddTagToPointer(p, tag);
530d89ec533Spatrick }
5313cab2bb3Spatrick
HwasanInstallAtForkHandler()532*810390e3Srobert void HwasanInstallAtForkHandler() {
533*810390e3Srobert auto before = []() {
534*810390e3Srobert HwasanAllocatorLock();
535*810390e3Srobert StackDepotLockAll();
536*810390e3Srobert };
537*810390e3Srobert auto after = []() {
538*810390e3Srobert StackDepotUnlockAll();
539*810390e3Srobert HwasanAllocatorUnlock();
540*810390e3Srobert };
541*810390e3Srobert pthread_atfork(before, after, after);
542*810390e3Srobert }
543*810390e3Srobert
InstallAtExitCheckLeaks()544*810390e3Srobert void InstallAtExitCheckLeaks() {
545*810390e3Srobert if (CAN_SANITIZE_LEAKS) {
546*810390e3Srobert if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
547*810390e3Srobert if (flags()->halt_on_error)
548*810390e3Srobert Atexit(__lsan::DoLeakCheck);
549*810390e3Srobert else
550*810390e3Srobert Atexit(__lsan::DoRecoverableLeakCheckVoid);
551*810390e3Srobert }
552*810390e3Srobert }
553*810390e3Srobert }
554*810390e3Srobert
5553cab2bb3Spatrick } // namespace __hwasan
5563cab2bb3Spatrick
5573cab2bb3Spatrick #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
558