xref: /llvm-project/compiler-rt/lib/xray/xray_powerpc64.cpp (revision e738a5d8e33911381dbd0e1bc5a0109e9ebb62f2)
1 //===-- xray_powerpc64.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
10 //
11 // Implementation of powerpc64 and powerpc64le routines.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "xray_defs.h"
16 #include "xray_interface_internal.h"
17 #include "xray_utils.h"
18 #include <atomic>
19 #include <cassert>
20 #include <cstring>
21 
22 #ifndef __LITTLE_ENDIAN__
23 #error powerpc64 big endian is not supported for now.
24 #endif
25 
26 namespace {
27 
28 constexpr unsigned long long JumpOverInstNum = 7;
29 
30 void clearCache(void *Addr, size_t Len) {
31   const size_t LineSize = 32;
32 
33   const intptr_t Mask = ~(LineSize - 1);
34   const intptr_t StartLine = ((intptr_t)Addr) & Mask;
35   const intptr_t EndLine = ((intptr_t)Addr + Len + LineSize - 1) & Mask;
36 
37   for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
38     asm volatile("dcbf 0, %0" : : "r"(Line));
39   asm volatile("sync");
40 
41   for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
42     asm volatile("icbi 0, %0" : : "r"(Line));
43   asm volatile("isync");
44 }
45 
46 } // namespace
47 
48 extern "C" void __clear_cache(void *start, void *end);
49 
50 namespace __xray {
51 
52 bool patchFunctionEntry(const bool Enable, uint32_t FuncId,
53                         const XRaySledEntry &Sled,
54                         const XRayTrampolines &Trampolines,
55                         bool LogArgs) XRAY_NEVER_INSTRUMENT {
56   // TODO: Trampoline addresses are currently inserted at compile-time, using
57   //       __xray_FunctionEntry and __xray_FunctionExit only.
58   //       To support DSO instrumentation, trampolines have to be written during
59   //       patching (see implementation on X86_64, e.g.).
60   const uint64_t Address = Sled.address();
61   if (Enable) {
62     // lis 0, FuncId[16..32]
63     // li 0, FuncId[0..15]
64     *reinterpret_cast<uint64_t *>(Address) =
65         (0x3c000000ull + (FuncId >> 16)) +
66         ((0x60000000ull + (FuncId & 0xffff)) << 32);
67   } else {
68     // b +JumpOverInstNum instructions.
69     *reinterpret_cast<uint32_t *>(Address) =
70         0x48000000ull + (JumpOverInstNum << 2);
71   }
72   clearCache(reinterpret_cast<void *>(Address), 8);
73   return true;
74 }
75 
76 bool patchFunctionExit(
77     const bool Enable, uint32_t FuncId, const XRaySledEntry &Sled,
78     const XRayTrampolines &Trampolines) XRAY_NEVER_INSTRUMENT {
79   // TODO: Trampoline addresses are currently inserted at compile-time, using
80   //       __xray_FunctionEntry and __xray_FunctionExit only.
81   //       To support DSO instrumentation, trampolines have to be written during
82   //       patching (see implementation on X86_64, e.g.).
83   const uint64_t Address = Sled.address();
84   if (Enable) {
85     // lis 0, FuncId[16..32]
86     // li 0, FuncId[0..15]
87     *reinterpret_cast<uint64_t *>(Address) =
88         (0x3c000000ull + (FuncId >> 16)) +
89         ((0x60000000ull + (FuncId & 0xffff)) << 32);
90   } else {
91     // Copy the blr/b instruction after JumpOverInstNum instructions.
92     *reinterpret_cast<uint32_t *>(Address) =
93         *(reinterpret_cast<uint32_t *>(Address) + JumpOverInstNum);
94   }
95   clearCache(reinterpret_cast<void *>(Address), 8);
96   return true;
97 }
98 
99 bool patchFunctionTailExit(
100     const bool Enable, const uint32_t FuncId, const XRaySledEntry &Sled,
101     const XRayTrampolines &Trampolines) XRAY_NEVER_INSTRUMENT {
102   return patchFunctionExit(Enable, FuncId, Sled, Trampolines);
103 }
104 
105 // FIXME: Maybe implement this better?
106 bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
107 
108 bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
109                       const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
110   // FIXME: Implement in powerpc64?
111   return false;
112 }
113 
114 bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
115                      const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
116   // FIXME: Implement in powerpc64?
117   return false;
118 }
119 
120 } // namespace __xray
121 
122 extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
123   // FIXME: this will have to be implemented in the trampoline assembly file
124 }
125 
126 extern "C" void __xray_FunctionTailExit() XRAY_NEVER_INSTRUMENT {
127   // For PowerPC, calls to __xray_FunctionEntry and __xray_FunctionExit
128   // are statically inserted into the sled. Tail exits are handled like normal
129   // function exits. This trampoline is therefore not implemented.
130   // This stub is placed here to avoid linking issues.
131 }
132