13cab2bb3Spatrick //===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of XRay, a dynamic runtime instrumentation system.
103cab2bb3Spatrick //
113cab2bb3Spatrick // Implementation of the API functions.
123cab2bb3Spatrick //
133cab2bb3Spatrick //===----------------------------------------------------------------------===//
143cab2bb3Spatrick
153cab2bb3Spatrick #include "xray_interface_internal.h"
163cab2bb3Spatrick
17*810390e3Srobert #include <cinttypes>
183cab2bb3Spatrick #include <cstdio>
193cab2bb3Spatrick #include <errno.h>
203cab2bb3Spatrick #include <limits>
213cab2bb3Spatrick #include <string.h>
223cab2bb3Spatrick #include <sys/mman.h>
233cab2bb3Spatrick
243cab2bb3Spatrick #if SANITIZER_FUCHSIA
253cab2bb3Spatrick #include <zircon/process.h>
263cab2bb3Spatrick #include <zircon/sanitizer.h>
273cab2bb3Spatrick #include <zircon/status.h>
283cab2bb3Spatrick #include <zircon/syscalls.h>
293cab2bb3Spatrick #endif
303cab2bb3Spatrick
313cab2bb3Spatrick #include "sanitizer_common/sanitizer_addrhashmap.h"
323cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
333cab2bb3Spatrick
343cab2bb3Spatrick #include "xray_defs.h"
353cab2bb3Spatrick #include "xray_flags.h"
363cab2bb3Spatrick
373cab2bb3Spatrick extern __sanitizer::SpinMutex XRayInstrMapMutex;
383cab2bb3Spatrick extern __sanitizer::atomic_uint8_t XRayInitialized;
393cab2bb3Spatrick extern __xray::XRaySledMap XRayInstrMap;
403cab2bb3Spatrick
413cab2bb3Spatrick namespace __xray {
423cab2bb3Spatrick
433cab2bb3Spatrick #if defined(__x86_64__)
443cab2bb3Spatrick static const int16_t cSledLength = 12;
453cab2bb3Spatrick #elif defined(__aarch64__)
463cab2bb3Spatrick static const int16_t cSledLength = 32;
473cab2bb3Spatrick #elif defined(__arm__)
483cab2bb3Spatrick static const int16_t cSledLength = 28;
493cab2bb3Spatrick #elif SANITIZER_MIPS32
503cab2bb3Spatrick static const int16_t cSledLength = 48;
513cab2bb3Spatrick #elif SANITIZER_MIPS64
523cab2bb3Spatrick static const int16_t cSledLength = 64;
533cab2bb3Spatrick #elif defined(__powerpc64__)
543cab2bb3Spatrick static const int16_t cSledLength = 8;
55*810390e3Srobert #elif defined(__hexagon__)
56*810390e3Srobert static const int16_t cSledLength = 20;
573cab2bb3Spatrick #else
583cab2bb3Spatrick #error "Unsupported CPU Architecture"
593cab2bb3Spatrick #endif /* CPU architecture */
603cab2bb3Spatrick
613cab2bb3Spatrick // This is the function to call when we encounter the entry or exit sleds.
623cab2bb3Spatrick atomic_uintptr_t XRayPatchedFunction{0};
633cab2bb3Spatrick
643cab2bb3Spatrick // This is the function to call from the arg1-enabled sleds/trampolines.
653cab2bb3Spatrick atomic_uintptr_t XRayArgLogger{0};
663cab2bb3Spatrick
673cab2bb3Spatrick // This is the function to call when we encounter a custom event log call.
683cab2bb3Spatrick atomic_uintptr_t XRayPatchedCustomEvent{0};
693cab2bb3Spatrick
703cab2bb3Spatrick // This is the function to call when we encounter a typed event log call.
713cab2bb3Spatrick atomic_uintptr_t XRayPatchedTypedEvent{0};
723cab2bb3Spatrick
733cab2bb3Spatrick // This is the global status to determine whether we are currently
743cab2bb3Spatrick // patching/unpatching.
753cab2bb3Spatrick atomic_uint8_t XRayPatching{0};
763cab2bb3Spatrick
773cab2bb3Spatrick struct TypeDescription {
783cab2bb3Spatrick uint32_t type_id;
793cab2bb3Spatrick std::size_t description_string_length;
803cab2bb3Spatrick };
813cab2bb3Spatrick
823cab2bb3Spatrick using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
833cab2bb3Spatrick // An address map from immutable descriptors to type ids.
843cab2bb3Spatrick TypeDescriptorMapType TypeDescriptorAddressMap{};
853cab2bb3Spatrick
863cab2bb3Spatrick atomic_uint32_t TypeEventDescriptorCounter{0};
873cab2bb3Spatrick
883cab2bb3Spatrick // MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
893cab2bb3Spatrick // undo any successful mprotect(...) changes. This is used to make a page
903cab2bb3Spatrick // writeable and executable, and upon destruction if it was successful in
913cab2bb3Spatrick // doing so returns the page into a read-only and executable page.
923cab2bb3Spatrick //
933cab2bb3Spatrick // This is only used specifically for runtime-patching of the XRay
943cab2bb3Spatrick // instrumentation points. This assumes that the executable pages are
953cab2bb3Spatrick // originally read-and-execute only.
963cab2bb3Spatrick class MProtectHelper {
973cab2bb3Spatrick void *PageAlignedAddr;
983cab2bb3Spatrick std::size_t MProtectLen;
993cab2bb3Spatrick bool MustCleanup;
1003cab2bb3Spatrick
1013cab2bb3Spatrick public:
MProtectHelper(void * PageAlignedAddr,std::size_t MProtectLen,std::size_t PageSize)1023cab2bb3Spatrick explicit MProtectHelper(void *PageAlignedAddr,
1033cab2bb3Spatrick std::size_t MProtectLen,
1043cab2bb3Spatrick std::size_t PageSize) XRAY_NEVER_INSTRUMENT
1053cab2bb3Spatrick : PageAlignedAddr(PageAlignedAddr),
1063cab2bb3Spatrick MProtectLen(MProtectLen),
1073cab2bb3Spatrick MustCleanup(false) {
1083cab2bb3Spatrick #if SANITIZER_FUCHSIA
1093cab2bb3Spatrick MProtectLen = RoundUpTo(MProtectLen, PageSize);
1103cab2bb3Spatrick #endif
1113cab2bb3Spatrick }
1123cab2bb3Spatrick
MakeWriteable()1133cab2bb3Spatrick int MakeWriteable() XRAY_NEVER_INSTRUMENT {
1143cab2bb3Spatrick #if SANITIZER_FUCHSIA
1153cab2bb3Spatrick auto R = __sanitizer_change_code_protection(
1163cab2bb3Spatrick reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, true);
1173cab2bb3Spatrick if (R != ZX_OK) {
1183cab2bb3Spatrick Report("XRay: cannot change code protection: %s\n",
1193cab2bb3Spatrick _zx_status_get_string(R));
1203cab2bb3Spatrick return -1;
1213cab2bb3Spatrick }
1223cab2bb3Spatrick MustCleanup = true;
1233cab2bb3Spatrick return 0;
1243cab2bb3Spatrick #else
1253cab2bb3Spatrick auto R = mprotect(PageAlignedAddr, MProtectLen,
1263cab2bb3Spatrick PROT_READ | PROT_WRITE | PROT_EXEC);
1273cab2bb3Spatrick if (R != -1)
1283cab2bb3Spatrick MustCleanup = true;
1293cab2bb3Spatrick return R;
1303cab2bb3Spatrick #endif
1313cab2bb3Spatrick }
1323cab2bb3Spatrick
~MProtectHelper()1333cab2bb3Spatrick ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
1343cab2bb3Spatrick if (MustCleanup) {
1353cab2bb3Spatrick #if SANITIZER_FUCHSIA
1363cab2bb3Spatrick auto R = __sanitizer_change_code_protection(
1373cab2bb3Spatrick reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, false);
1383cab2bb3Spatrick if (R != ZX_OK) {
1393cab2bb3Spatrick Report("XRay: cannot change code protection: %s\n",
1403cab2bb3Spatrick _zx_status_get_string(R));
1413cab2bb3Spatrick }
1423cab2bb3Spatrick #else
1433cab2bb3Spatrick mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
1443cab2bb3Spatrick #endif
1453cab2bb3Spatrick }
1463cab2bb3Spatrick }
1473cab2bb3Spatrick };
1483cab2bb3Spatrick
1493cab2bb3Spatrick namespace {
1503cab2bb3Spatrick
patchSled(const XRaySledEntry & Sled,bool Enable,int32_t FuncId)1513cab2bb3Spatrick bool patchSled(const XRaySledEntry &Sled, bool Enable,
1523cab2bb3Spatrick int32_t FuncId) XRAY_NEVER_INSTRUMENT {
1533cab2bb3Spatrick bool Success = false;
1543cab2bb3Spatrick switch (Sled.Kind) {
1553cab2bb3Spatrick case XRayEntryType::ENTRY:
1563cab2bb3Spatrick Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
1573cab2bb3Spatrick break;
1583cab2bb3Spatrick case XRayEntryType::EXIT:
1593cab2bb3Spatrick Success = patchFunctionExit(Enable, FuncId, Sled);
1603cab2bb3Spatrick break;
1613cab2bb3Spatrick case XRayEntryType::TAIL:
1623cab2bb3Spatrick Success = patchFunctionTailExit(Enable, FuncId, Sled);
1633cab2bb3Spatrick break;
1643cab2bb3Spatrick case XRayEntryType::LOG_ARGS_ENTRY:
1653cab2bb3Spatrick Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
1663cab2bb3Spatrick break;
1673cab2bb3Spatrick case XRayEntryType::CUSTOM_EVENT:
1683cab2bb3Spatrick Success = patchCustomEvent(Enable, FuncId, Sled);
1693cab2bb3Spatrick break;
1703cab2bb3Spatrick case XRayEntryType::TYPED_EVENT:
1713cab2bb3Spatrick Success = patchTypedEvent(Enable, FuncId, Sled);
1723cab2bb3Spatrick break;
1733cab2bb3Spatrick default:
174*810390e3Srobert Report("Unsupported sled kind '%" PRIu64 "' @%04x\n", Sled.Address,
175*810390e3Srobert int(Sled.Kind));
1763cab2bb3Spatrick return false;
1773cab2bb3Spatrick }
1783cab2bb3Spatrick return Success;
1793cab2bb3Spatrick }
1803cab2bb3Spatrick
1811f9cb04fSpatrick const XRayFunctionSledIndex
findFunctionSleds(int32_t FuncId,const XRaySledMap & InstrMap)1821f9cb04fSpatrick findFunctionSleds(int32_t FuncId,
1831f9cb04fSpatrick const XRaySledMap &InstrMap) XRAY_NEVER_INSTRUMENT {
1841f9cb04fSpatrick int32_t CurFn = 0;
1851f9cb04fSpatrick uint64_t LastFnAddr = 0;
1861f9cb04fSpatrick XRayFunctionSledIndex Index = {nullptr, nullptr};
1871f9cb04fSpatrick
1881f9cb04fSpatrick for (std::size_t I = 0; I < InstrMap.Entries && CurFn <= FuncId; I++) {
1891f9cb04fSpatrick const auto &Sled = InstrMap.Sleds[I];
1901f9cb04fSpatrick const auto Function = Sled.function();
1911f9cb04fSpatrick if (Function != LastFnAddr) {
1921f9cb04fSpatrick CurFn++;
1931f9cb04fSpatrick LastFnAddr = Function;
1941f9cb04fSpatrick }
1951f9cb04fSpatrick
1961f9cb04fSpatrick if (CurFn == FuncId) {
1971f9cb04fSpatrick if (Index.Begin == nullptr)
1981f9cb04fSpatrick Index.Begin = &Sled;
1991f9cb04fSpatrick Index.End = &Sled;
2001f9cb04fSpatrick }
2011f9cb04fSpatrick }
2021f9cb04fSpatrick
2031f9cb04fSpatrick Index.End += 1;
2041f9cb04fSpatrick
2051f9cb04fSpatrick return Index;
2061f9cb04fSpatrick }
2071f9cb04fSpatrick
patchFunction(int32_t FuncId,bool Enable)2083cab2bb3Spatrick XRayPatchingStatus patchFunction(int32_t FuncId,
2093cab2bb3Spatrick bool Enable) XRAY_NEVER_INSTRUMENT {
2103cab2bb3Spatrick if (!atomic_load(&XRayInitialized,
2113cab2bb3Spatrick memory_order_acquire))
2123cab2bb3Spatrick return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
2133cab2bb3Spatrick
2143cab2bb3Spatrick uint8_t NotPatching = false;
2153cab2bb3Spatrick if (!atomic_compare_exchange_strong(
2163cab2bb3Spatrick &XRayPatching, &NotPatching, true, memory_order_acq_rel))
2173cab2bb3Spatrick return XRayPatchingStatus::ONGOING; // Already patching.
2183cab2bb3Spatrick
2193cab2bb3Spatrick // Next, we look for the function index.
2203cab2bb3Spatrick XRaySledMap InstrMap;
2213cab2bb3Spatrick {
2223cab2bb3Spatrick SpinMutexLock Guard(&XRayInstrMapMutex);
2233cab2bb3Spatrick InstrMap = XRayInstrMap;
2243cab2bb3Spatrick }
2253cab2bb3Spatrick
2263cab2bb3Spatrick // If we don't have an index, we can't patch individual functions.
2273cab2bb3Spatrick if (InstrMap.Functions == 0)
2283cab2bb3Spatrick return XRayPatchingStatus::NOT_INITIALIZED;
2293cab2bb3Spatrick
2303cab2bb3Spatrick // FuncId must be a positive number, less than the number of functions
2313cab2bb3Spatrick // instrumented.
2323cab2bb3Spatrick if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
2333cab2bb3Spatrick Report("Invalid function id provided: %d\n", FuncId);
2343cab2bb3Spatrick return XRayPatchingStatus::FAILED;
2353cab2bb3Spatrick }
2363cab2bb3Spatrick
2373cab2bb3Spatrick // Now we patch ths sleds for this specific function.
2381f9cb04fSpatrick auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1]
2391f9cb04fSpatrick : findFunctionSleds(FuncId, InstrMap);
2403cab2bb3Spatrick auto *f = SledRange.Begin;
2413cab2bb3Spatrick auto *e = SledRange.End;
2423cab2bb3Spatrick bool SucceedOnce = false;
2433cab2bb3Spatrick while (f != e)
2443cab2bb3Spatrick SucceedOnce |= patchSled(*f++, Enable, FuncId);
2453cab2bb3Spatrick
2463cab2bb3Spatrick atomic_store(&XRayPatching, false,
2473cab2bb3Spatrick memory_order_release);
2483cab2bb3Spatrick
2493cab2bb3Spatrick if (!SucceedOnce) {
2503cab2bb3Spatrick Report("Failed patching any sled for function '%d'.", FuncId);
2513cab2bb3Spatrick return XRayPatchingStatus::FAILED;
2523cab2bb3Spatrick }
2533cab2bb3Spatrick
2543cab2bb3Spatrick return XRayPatchingStatus::SUCCESS;
2553cab2bb3Spatrick }
2563cab2bb3Spatrick
2573cab2bb3Spatrick // controlPatching implements the common internals of the patching/unpatching
2583cab2bb3Spatrick // implementation. |Enable| defines whether we're enabling or disabling the
2593cab2bb3Spatrick // runtime XRay instrumentation.
controlPatching(bool Enable)2603cab2bb3Spatrick XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
2613cab2bb3Spatrick if (!atomic_load(&XRayInitialized,
2623cab2bb3Spatrick memory_order_acquire))
2633cab2bb3Spatrick return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
2643cab2bb3Spatrick
2653cab2bb3Spatrick uint8_t NotPatching = false;
2663cab2bb3Spatrick if (!atomic_compare_exchange_strong(
2673cab2bb3Spatrick &XRayPatching, &NotPatching, true, memory_order_acq_rel))
2683cab2bb3Spatrick return XRayPatchingStatus::ONGOING; // Already patching.
2693cab2bb3Spatrick
2703cab2bb3Spatrick uint8_t PatchingSuccess = false;
2713cab2bb3Spatrick auto XRayPatchingStatusResetter =
2723cab2bb3Spatrick at_scope_exit([&PatchingSuccess] {
2733cab2bb3Spatrick if (!PatchingSuccess)
2743cab2bb3Spatrick atomic_store(&XRayPatching, false,
2753cab2bb3Spatrick memory_order_release);
2763cab2bb3Spatrick });
2773cab2bb3Spatrick
2783cab2bb3Spatrick XRaySledMap InstrMap;
2793cab2bb3Spatrick {
2803cab2bb3Spatrick SpinMutexLock Guard(&XRayInstrMapMutex);
2813cab2bb3Spatrick InstrMap = XRayInstrMap;
2823cab2bb3Spatrick }
2833cab2bb3Spatrick if (InstrMap.Entries == 0)
2843cab2bb3Spatrick return XRayPatchingStatus::NOT_INITIALIZED;
2853cab2bb3Spatrick
2863cab2bb3Spatrick uint32_t FuncId = 1;
2873cab2bb3Spatrick uint64_t CurFun = 0;
2883cab2bb3Spatrick
2893cab2bb3Spatrick // First we want to find the bounds for which we have instrumentation points,
2903cab2bb3Spatrick // and try to get as few calls to mprotect(...) as possible. We're assuming
2913cab2bb3Spatrick // that all the sleds for the instrumentation map are contiguous as a single
2923cab2bb3Spatrick // set of pages. When we do support dynamic shared object instrumentation,
2933cab2bb3Spatrick // we'll need to do this for each set of page load offsets per DSO loaded. For
2943cab2bb3Spatrick // now we're assuming we can mprotect the whole section of text between the
2953cab2bb3Spatrick // minimum sled address and the maximum sled address (+ the largest sled
2963cab2bb3Spatrick // size).
2971f9cb04fSpatrick auto *MinSled = &InstrMap.Sleds[0];
2981f9cb04fSpatrick auto *MaxSled = &InstrMap.Sleds[InstrMap.Entries - 1];
2993cab2bb3Spatrick for (std::size_t I = 0; I < InstrMap.Entries; I++) {
3003cab2bb3Spatrick const auto &Sled = InstrMap.Sleds[I];
3011f9cb04fSpatrick if (Sled.address() < MinSled->address())
3021f9cb04fSpatrick MinSled = &Sled;
3031f9cb04fSpatrick if (Sled.address() > MaxSled->address())
3041f9cb04fSpatrick MaxSled = &Sled;
3053cab2bb3Spatrick }
3063cab2bb3Spatrick
3073cab2bb3Spatrick const size_t PageSize = flags()->xray_page_size_override > 0
3083cab2bb3Spatrick ? flags()->xray_page_size_override
3093cab2bb3Spatrick : GetPageSizeCached();
3103cab2bb3Spatrick if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
311*810390e3Srobert Report("System page size is not a power of two: %zu\n", PageSize);
3123cab2bb3Spatrick return XRayPatchingStatus::FAILED;
3133cab2bb3Spatrick }
3143cab2bb3Spatrick
3153cab2bb3Spatrick void *PageAlignedAddr =
3161f9cb04fSpatrick reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1));
3173cab2bb3Spatrick size_t MProtectLen =
3181f9cb04fSpatrick (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) +
3191f9cb04fSpatrick cSledLength;
3203cab2bb3Spatrick MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
3213cab2bb3Spatrick if (Protector.MakeWriteable() == -1) {
3223cab2bb3Spatrick Report("Failed mprotect: %d\n", errno);
3233cab2bb3Spatrick return XRayPatchingStatus::FAILED;
3243cab2bb3Spatrick }
3253cab2bb3Spatrick
3263cab2bb3Spatrick for (std::size_t I = 0; I < InstrMap.Entries; ++I) {
3273cab2bb3Spatrick auto &Sled = InstrMap.Sleds[I];
3281f9cb04fSpatrick auto F = Sled.function();
3293cab2bb3Spatrick if (CurFun == 0)
3303cab2bb3Spatrick CurFun = F;
3313cab2bb3Spatrick if (F != CurFun) {
3323cab2bb3Spatrick ++FuncId;
3333cab2bb3Spatrick CurFun = F;
3343cab2bb3Spatrick }
3353cab2bb3Spatrick patchSled(Sled, Enable, FuncId);
3363cab2bb3Spatrick }
3373cab2bb3Spatrick atomic_store(&XRayPatching, false,
3383cab2bb3Spatrick memory_order_release);
3393cab2bb3Spatrick PatchingSuccess = true;
3403cab2bb3Spatrick return XRayPatchingStatus::SUCCESS;
3413cab2bb3Spatrick }
3423cab2bb3Spatrick
mprotectAndPatchFunction(int32_t FuncId,bool Enable)3433cab2bb3Spatrick XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
3443cab2bb3Spatrick bool Enable) XRAY_NEVER_INSTRUMENT {
3453cab2bb3Spatrick XRaySledMap InstrMap;
3463cab2bb3Spatrick {
3473cab2bb3Spatrick SpinMutexLock Guard(&XRayInstrMapMutex);
3483cab2bb3Spatrick InstrMap = XRayInstrMap;
3493cab2bb3Spatrick }
3503cab2bb3Spatrick
3513cab2bb3Spatrick // FuncId must be a positive number, less than the number of functions
3523cab2bb3Spatrick // instrumented.
3533cab2bb3Spatrick if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
3543cab2bb3Spatrick Report("Invalid function id provided: %d\n", FuncId);
3553cab2bb3Spatrick return XRayPatchingStatus::FAILED;
3563cab2bb3Spatrick }
3573cab2bb3Spatrick
3583cab2bb3Spatrick const size_t PageSize = flags()->xray_page_size_override > 0
3593cab2bb3Spatrick ? flags()->xray_page_size_override
3603cab2bb3Spatrick : GetPageSizeCached();
3613cab2bb3Spatrick if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
362*810390e3Srobert Report("Provided page size is not a power of two: %zu\n", PageSize);
3633cab2bb3Spatrick return XRayPatchingStatus::FAILED;
3643cab2bb3Spatrick }
3653cab2bb3Spatrick
366*810390e3Srobert // Here we compute the minimum sled and maximum sled associated with a
3673cab2bb3Spatrick // particular function ID.
3681f9cb04fSpatrick auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1]
3691f9cb04fSpatrick : findFunctionSleds(FuncId, InstrMap);
3703cab2bb3Spatrick auto *f = SledRange.Begin;
3713cab2bb3Spatrick auto *e = SledRange.End;
3721f9cb04fSpatrick auto *MinSled = f;
3731f9cb04fSpatrick auto *MaxSled = (SledRange.End - 1);
3743cab2bb3Spatrick while (f != e) {
3751f9cb04fSpatrick if (f->address() < MinSled->address())
3761f9cb04fSpatrick MinSled = f;
3771f9cb04fSpatrick if (f->address() > MaxSled->address())
3781f9cb04fSpatrick MaxSled = f;
3793cab2bb3Spatrick ++f;
3803cab2bb3Spatrick }
3813cab2bb3Spatrick
3823cab2bb3Spatrick void *PageAlignedAddr =
3831f9cb04fSpatrick reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1));
3843cab2bb3Spatrick size_t MProtectLen =
3851f9cb04fSpatrick (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) +
3861f9cb04fSpatrick cSledLength;
3873cab2bb3Spatrick MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
3883cab2bb3Spatrick if (Protector.MakeWriteable() == -1) {
3893cab2bb3Spatrick Report("Failed mprotect: %d\n", errno);
3903cab2bb3Spatrick return XRayPatchingStatus::FAILED;
3913cab2bb3Spatrick }
3923cab2bb3Spatrick return patchFunction(FuncId, Enable);
3933cab2bb3Spatrick }
3943cab2bb3Spatrick
3953cab2bb3Spatrick } // namespace
3963cab2bb3Spatrick
3973cab2bb3Spatrick } // namespace __xray
3983cab2bb3Spatrick
3993cab2bb3Spatrick using namespace __xray;
4003cab2bb3Spatrick
4013cab2bb3Spatrick // The following functions are declared `extern "C" {...}` in the header, hence
4023cab2bb3Spatrick // they're defined in the global namespace.
4033cab2bb3Spatrick
__xray_set_handler(void (* entry)(int32_t,XRayEntryType))4043cab2bb3Spatrick int __xray_set_handler(void (*entry)(int32_t,
4053cab2bb3Spatrick XRayEntryType)) XRAY_NEVER_INSTRUMENT {
4063cab2bb3Spatrick if (atomic_load(&XRayInitialized,
4073cab2bb3Spatrick memory_order_acquire)) {
4083cab2bb3Spatrick
4093cab2bb3Spatrick atomic_store(&__xray::XRayPatchedFunction,
4103cab2bb3Spatrick reinterpret_cast<uintptr_t>(entry),
4113cab2bb3Spatrick memory_order_release);
4123cab2bb3Spatrick return 1;
4133cab2bb3Spatrick }
4143cab2bb3Spatrick return 0;
4153cab2bb3Spatrick }
4163cab2bb3Spatrick
__xray_set_customevent_handler(void (* entry)(void *,size_t))4173cab2bb3Spatrick int __xray_set_customevent_handler(void (*entry)(void *, size_t))
4183cab2bb3Spatrick XRAY_NEVER_INSTRUMENT {
4193cab2bb3Spatrick if (atomic_load(&XRayInitialized,
4203cab2bb3Spatrick memory_order_acquire)) {
4213cab2bb3Spatrick atomic_store(&__xray::XRayPatchedCustomEvent,
4223cab2bb3Spatrick reinterpret_cast<uintptr_t>(entry),
4233cab2bb3Spatrick memory_order_release);
4243cab2bb3Spatrick return 1;
4253cab2bb3Spatrick }
4263cab2bb3Spatrick return 0;
4273cab2bb3Spatrick }
4283cab2bb3Spatrick
__xray_set_typedevent_handler(void (* entry)(uint16_t,const void *,size_t))4293cab2bb3Spatrick int __xray_set_typedevent_handler(void (*entry)(
4303cab2bb3Spatrick uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
4313cab2bb3Spatrick if (atomic_load(&XRayInitialized,
4323cab2bb3Spatrick memory_order_acquire)) {
4333cab2bb3Spatrick atomic_store(&__xray::XRayPatchedTypedEvent,
4343cab2bb3Spatrick reinterpret_cast<uintptr_t>(entry),
4353cab2bb3Spatrick memory_order_release);
4363cab2bb3Spatrick return 1;
4373cab2bb3Spatrick }
4383cab2bb3Spatrick return 0;
4393cab2bb3Spatrick }
4403cab2bb3Spatrick
__xray_remove_handler()4413cab2bb3Spatrick int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
4423cab2bb3Spatrick return __xray_set_handler(nullptr);
4433cab2bb3Spatrick }
4443cab2bb3Spatrick
__xray_remove_customevent_handler()4453cab2bb3Spatrick int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
4463cab2bb3Spatrick return __xray_set_customevent_handler(nullptr);
4473cab2bb3Spatrick }
4483cab2bb3Spatrick
__xray_remove_typedevent_handler()4493cab2bb3Spatrick int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT {
4503cab2bb3Spatrick return __xray_set_typedevent_handler(nullptr);
4513cab2bb3Spatrick }
4523cab2bb3Spatrick
__xray_register_event_type(const char * const event_type)4533cab2bb3Spatrick uint16_t __xray_register_event_type(
4543cab2bb3Spatrick const char *const event_type) XRAY_NEVER_INSTRUMENT {
4553cab2bb3Spatrick TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
4563cab2bb3Spatrick if (h.created()) {
4573cab2bb3Spatrick h->type_id = atomic_fetch_add(
4583cab2bb3Spatrick &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
4593cab2bb3Spatrick h->description_string_length = strnlen(event_type, 1024);
4603cab2bb3Spatrick }
4613cab2bb3Spatrick return h->type_id;
4623cab2bb3Spatrick }
4633cab2bb3Spatrick
__xray_patch()4643cab2bb3Spatrick XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
4653cab2bb3Spatrick return controlPatching(true);
4663cab2bb3Spatrick }
4673cab2bb3Spatrick
__xray_unpatch()4683cab2bb3Spatrick XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
4693cab2bb3Spatrick return controlPatching(false);
4703cab2bb3Spatrick }
4713cab2bb3Spatrick
__xray_patch_function(int32_t FuncId)4723cab2bb3Spatrick XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
4733cab2bb3Spatrick return mprotectAndPatchFunction(FuncId, true);
4743cab2bb3Spatrick }
4753cab2bb3Spatrick
4763cab2bb3Spatrick XRayPatchingStatus
__xray_unpatch_function(int32_t FuncId)4773cab2bb3Spatrick __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
4783cab2bb3Spatrick return mprotectAndPatchFunction(FuncId, false);
4793cab2bb3Spatrick }
4803cab2bb3Spatrick
__xray_set_handler_arg1(void (* entry)(int32_t,XRayEntryType,uint64_t))4813cab2bb3Spatrick int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
4823cab2bb3Spatrick if (!atomic_load(&XRayInitialized,
4833cab2bb3Spatrick memory_order_acquire))
4843cab2bb3Spatrick return 0;
4853cab2bb3Spatrick
4863cab2bb3Spatrick // A relaxed write might not be visible even if the current thread gets
4873cab2bb3Spatrick // scheduled on a different CPU/NUMA node. We need to wait for everyone to
4883cab2bb3Spatrick // have this handler installed for consistency of collected data across CPUs.
4893cab2bb3Spatrick atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
4903cab2bb3Spatrick memory_order_release);
4913cab2bb3Spatrick return 1;
4923cab2bb3Spatrick }
4933cab2bb3Spatrick
__xray_remove_handler_arg1()4943cab2bb3Spatrick int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
4953cab2bb3Spatrick
__xray_function_address(int32_t FuncId)4963cab2bb3Spatrick uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
4971f9cb04fSpatrick XRaySledMap InstrMap;
4981f9cb04fSpatrick {
4993cab2bb3Spatrick SpinMutexLock Guard(&XRayInstrMapMutex);
5001f9cb04fSpatrick InstrMap = XRayInstrMap;
5011f9cb04fSpatrick }
5021f9cb04fSpatrick
5031f9cb04fSpatrick if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions)
5043cab2bb3Spatrick return 0;
5051f9cb04fSpatrick const XRaySledEntry *Sled = InstrMap.SledsIndex
5061f9cb04fSpatrick ? InstrMap.SledsIndex[FuncId - 1].Begin
5071f9cb04fSpatrick : findFunctionSleds(FuncId, InstrMap).Begin;
5081f9cb04fSpatrick return Sled->function()
5093cab2bb3Spatrick // On PPC, function entries are always aligned to 16 bytes. The beginning of a
5103cab2bb3Spatrick // sled might be a local entry, which is always +8 based on the global entry.
5113cab2bb3Spatrick // Always return the global entry.
5123cab2bb3Spatrick #ifdef __PPC__
5133cab2bb3Spatrick & ~0xf
5143cab2bb3Spatrick #endif
5153cab2bb3Spatrick ;
5163cab2bb3Spatrick }
5173cab2bb3Spatrick
__xray_max_function_id()5183cab2bb3Spatrick size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
5193cab2bb3Spatrick SpinMutexLock Guard(&XRayInstrMapMutex);
5203cab2bb3Spatrick return XRayInstrMap.Functions;
5213cab2bb3Spatrick }
522