1 //===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_STACKTRACE_H
13 #define SANITIZER_STACKTRACE_H
14
15 #include "sanitizer_common.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_platform.h"
18
19 namespace __sanitizer {
20
21 struct BufferedStackTrace;
22
23 static const u32 kStackTraceMax = 256;
24
25 #if SANITIZER_LINUX && defined(__mips__)
26 # define SANITIZER_CAN_FAST_UNWIND 0
27 #elif SANITIZER_WINDOWS
28 # define SANITIZER_CAN_FAST_UNWIND 0
29 #else
30 # define SANITIZER_CAN_FAST_UNWIND 1
31 #endif
32
33 // Fast unwind is the only option on Mac for now; we will need to
34 // revisit this macro when slow unwind works on Mac, see
35 // https://github.com/google/sanitizers/issues/137
36 #if SANITIZER_MAC
37 # define SANITIZER_CAN_SLOW_UNWIND 0
38 #else
39 # define SANITIZER_CAN_SLOW_UNWIND 1
40 #endif
41
42 struct StackTrace {
43 const uptr *trace;
44 u32 size;
45 u32 tag;
46
47 static const int TAG_UNKNOWN = 0;
48 static const int TAG_ALLOC = 1;
49 static const int TAG_DEALLOC = 2;
50 static const int TAG_CUSTOM = 100; // Tool specific tags start here.
51
StackTraceStackTrace52 StackTrace() : trace(nullptr), size(0), tag(0) {}
StackTraceStackTrace53 StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
StackTraceStackTrace54 StackTrace(const uptr *trace, u32 size, u32 tag)
55 : trace(trace), size(size), tag(tag) {}
56
57 // Prints a symbolized stacktrace, followed by an empty line.
58 void Print() const;
59
60 // Prints a symbolized stacktrace to the output string, followed by an empty
61 // line.
62 void PrintTo(InternalScopedString *output) const;
63
64 // Prints a symbolized stacktrace to the output buffer, followed by an empty
65 // line. Returns the number of symbols that should have been written to buffer
66 // (not including trailing '\0'). Thus, the string is truncated iff return
67 // value is not less than "out_buf_size".
68 uptr PrintTo(char *out_buf, uptr out_buf_size) const;
69
WillUseFastUnwindStackTrace70 static bool WillUseFastUnwind(bool request_fast_unwind) {
71 if (!SANITIZER_CAN_FAST_UNWIND)
72 return false;
73 if (!SANITIZER_CAN_SLOW_UNWIND)
74 return true;
75 return request_fast_unwind;
76 }
77
78 static uptr GetCurrentPc();
79 static inline uptr GetPreviousInstructionPc(uptr pc);
80 static uptr GetNextInstructionPc(uptr pc);
81 };
82
83 // Performance-critical, must be in the header.
84 ALWAYS_INLINE
GetPreviousInstructionPc(uptr pc)85 uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
86 #if defined(__arm__)
87 // T32 (Thumb) branch instructions might be 16 or 32 bit long,
88 // so we return (pc-2) in that case in order to be safe.
89 // For A32 mode we return (pc-4) because all instructions are 32 bit long.
90 return (pc - 3) & (~1);
91 #elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
92 // PCs are always 4 byte aligned.
93 return pc - 4;
94 #elif defined(__sparc__) || defined(__mips__)
95 return pc - 8;
96 #elif SANITIZER_RISCV64
97 // RV-64 has variable instruciton length...
98 // C extentions gives us 2-byte instructoins
99 // RV-64 has 4-byte instructions
100 // + RISCV architecture allows instructions up to 8 bytes
101 // It seems difficult to figure out the exact instruction length -
102 // pc - 2 seems like a safe option for the purposes of stack tracing
103 return pc - 2;
104 #else
105 return pc - 1;
106 #endif
107 }
108
109 // StackTrace that owns the buffer used to store the addresses.
110 struct BufferedStackTrace : public StackTrace {
111 uptr trace_buffer[kStackTraceMax];
112 uptr top_frame_bp; // Optional bp of a top frame.
113
BufferedStackTraceBufferedStackTrace114 BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
115
116 void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
117
118 // Get the stack trace with the given pc and bp.
119 // The pc will be in the position 0 of the resulting stack trace.
120 // The bp may refer to the current frame or to the caller's frame.
121 void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
122 u32 max_depth = kStackTraceMax) {
123 top_frame_bp = (max_depth > 0) ? bp : 0;
124 // Small max_depth optimization
125 if (max_depth <= 1) {
126 if (max_depth == 1)
127 trace_buffer[0] = pc;
128 size = max_depth;
129 return;
130 }
131 UnwindImpl(pc, bp, context, request_fast, max_depth);
132 }
133
134 void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
135 uptr stack_bottom, bool request_fast_unwind);
136
ResetBufferedStackTrace137 void Reset() {
138 *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
139 top_frame_bp = 0;
140 }
141
142 private:
143 // Every runtime defines its own implementation of this method
144 void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
145 u32 max_depth);
146
147 // UnwindFast/Slow have platform-specific implementations
148 void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
149 u32 max_depth);
150 void UnwindSlow(uptr pc, u32 max_depth);
151 void UnwindSlow(uptr pc, void *context, u32 max_depth);
152
153 void PopStackFrames(uptr count);
154 uptr LocatePcInTrace(uptr pc);
155
156 BufferedStackTrace(const BufferedStackTrace &) = delete;
157 void operator=(const BufferedStackTrace &) = delete;
158
159 friend class FastUnwindTest;
160 };
161
162 #if defined(__s390x__)
163 static const uptr kFrameSize = 160;
164 #elif defined(__s390__)
165 static const uptr kFrameSize = 96;
166 #else
167 static const uptr kFrameSize = 2 * sizeof(uhwptr);
168 #endif
169
170 // Check if given pointer points into allocated stack area.
IsValidFrame(uptr frame,uptr stack_top,uptr stack_bottom)171 static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
172 return frame > stack_bottom && frame < stack_top - kFrameSize;
173 }
174
175 } // namespace __sanitizer
176
177 // Use this macro if you want to print stack trace with the caller
178 // of the current function in the top frame.
179 #define GET_CALLER_PC_BP \
180 uptr bp = GET_CURRENT_FRAME(); \
181 uptr pc = GET_CALLER_PC();
182
183 #define GET_CALLER_PC_BP_SP \
184 GET_CALLER_PC_BP; \
185 uptr local_stack; \
186 uptr sp = (uptr)&local_stack
187
188 // Use this macro if you want to print stack trace with the current
189 // function in the top frame.
190 #define GET_CURRENT_PC_BP \
191 uptr bp = GET_CURRENT_FRAME(); \
192 uptr pc = StackTrace::GetCurrentPc()
193
194 #define GET_CURRENT_PC_BP_SP \
195 GET_CURRENT_PC_BP; \
196 uptr local_stack; \
197 uptr sp = (uptr)&local_stack
198
199 // GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
200 // Optimized x86 version is faster than GetCurrentPc because
201 // it does not involve a function call, instead it reads RIP register.
202 // Reads of RIP by an instruction return RIP pointing to the next
203 // instruction, which is exactly what we want here, thus 0 offset.
204 // It needs to be a macro because otherwise we will get the name
205 // of this function on the top of most stacks. Attribute artificial
206 // does not do what it claims to do, unfortunatley. And attribute
207 // __nodebug__ is clang-only. If we would have an attribute that
208 // would remove this function from debug info, we could simply make
209 // StackTrace::GetCurrentPc() faster.
210 #if defined(__x86_64__)
211 # define GET_CURRENT_PC() \
212 (__extension__({ \
213 uptr pc; \
214 asm("lea 0(%%rip), %0" : "=r"(pc)); \
215 pc; \
216 }))
217 #else
218 # define GET_CURRENT_PC() StackTrace::GetCurrentPc()
219 #endif
220
221 #endif // SANITIZER_STACKTRACE_H
222