1*ef5ccd6cSJohn Marino /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2*ef5ccd6cSJohn Marino
3*ef5ccd6cSJohn Marino Copyright (C) 2013 Free Software Foundation, Inc.
4*ef5ccd6cSJohn Marino
5*ef5ccd6cSJohn Marino Contributed by Intel Corp. <markus.t.metzger@intel.com>
6*ef5ccd6cSJohn Marino
7*ef5ccd6cSJohn Marino This file is part of GDB.
8*ef5ccd6cSJohn Marino
9*ef5ccd6cSJohn Marino This program is free software; you can redistribute it and/or modify
10*ef5ccd6cSJohn Marino it under the terms of the GNU General Public License as published by
11*ef5ccd6cSJohn Marino the Free Software Foundation; either version 3 of the License, or
12*ef5ccd6cSJohn Marino (at your option) any later version.
13*ef5ccd6cSJohn Marino
14*ef5ccd6cSJohn Marino This program is distributed in the hope that it will be useful,
15*ef5ccd6cSJohn Marino but WITHOUT ANY WARRANTY; without even the implied warranty of
16*ef5ccd6cSJohn Marino MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17*ef5ccd6cSJohn Marino GNU General Public License for more details.
18*ef5ccd6cSJohn Marino
19*ef5ccd6cSJohn Marino You should have received a copy of the GNU General Public License
20*ef5ccd6cSJohn Marino along with this program. If not, see <http://www.gnu.org/licenses/>. */
21*ef5ccd6cSJohn Marino
22*ef5ccd6cSJohn Marino #ifdef GDBSERVER
23*ef5ccd6cSJohn Marino #include "server.h"
24*ef5ccd6cSJohn Marino #else
25*ef5ccd6cSJohn Marino #include "defs.h"
26*ef5ccd6cSJohn Marino #endif
27*ef5ccd6cSJohn Marino
28*ef5ccd6cSJohn Marino #include "linux-btrace.h"
29*ef5ccd6cSJohn Marino #include "common-utils.h"
30*ef5ccd6cSJohn Marino #include "gdb_assert.h"
31*ef5ccd6cSJohn Marino #include "regcache.h"
32*ef5ccd6cSJohn Marino #include "gdbthread.h"
33*ef5ccd6cSJohn Marino
34*ef5ccd6cSJohn Marino #if HAVE_LINUX_PERF_EVENT_H
35*ef5ccd6cSJohn Marino
36*ef5ccd6cSJohn Marino #include <errno.h>
37*ef5ccd6cSJohn Marino #include <string.h>
38*ef5ccd6cSJohn Marino #include <stdint.h>
39*ef5ccd6cSJohn Marino #include <unistd.h>
40*ef5ccd6cSJohn Marino #include <sys/syscall.h>
41*ef5ccd6cSJohn Marino #include <sys/mman.h>
42*ef5ccd6cSJohn Marino #include <sys/user.h>
43*ef5ccd6cSJohn Marino #include <sys/ptrace.h>
44*ef5ccd6cSJohn Marino #include <sys/types.h>
45*ef5ccd6cSJohn Marino #include <sys/wait.h>
46*ef5ccd6cSJohn Marino #include <signal.h>
47*ef5ccd6cSJohn Marino
48*ef5ccd6cSJohn Marino /* A branch trace record in perf_event. */
49*ef5ccd6cSJohn Marino struct perf_event_bts
50*ef5ccd6cSJohn Marino {
51*ef5ccd6cSJohn Marino /* The linear address of the branch source. */
52*ef5ccd6cSJohn Marino uint64_t from;
53*ef5ccd6cSJohn Marino
54*ef5ccd6cSJohn Marino /* The linear address of the branch destination. */
55*ef5ccd6cSJohn Marino uint64_t to;
56*ef5ccd6cSJohn Marino };
57*ef5ccd6cSJohn Marino
58*ef5ccd6cSJohn Marino /* A perf_event branch trace sample. */
59*ef5ccd6cSJohn Marino struct perf_event_sample
60*ef5ccd6cSJohn Marino {
61*ef5ccd6cSJohn Marino /* The perf_event sample header. */
62*ef5ccd6cSJohn Marino struct perf_event_header header;
63*ef5ccd6cSJohn Marino
64*ef5ccd6cSJohn Marino /* The perf_event branch tracing payload. */
65*ef5ccd6cSJohn Marino struct perf_event_bts bts;
66*ef5ccd6cSJohn Marino };
67*ef5ccd6cSJohn Marino
68*ef5ccd6cSJohn Marino /* Get the perf_event header. */
69*ef5ccd6cSJohn Marino
70*ef5ccd6cSJohn Marino static inline volatile struct perf_event_mmap_page *
perf_event_header(struct btrace_target_info * tinfo)71*ef5ccd6cSJohn Marino perf_event_header (struct btrace_target_info* tinfo)
72*ef5ccd6cSJohn Marino {
73*ef5ccd6cSJohn Marino return tinfo->buffer;
74*ef5ccd6cSJohn Marino }
75*ef5ccd6cSJohn Marino
76*ef5ccd6cSJohn Marino /* Get the size of the perf_event mmap buffer. */
77*ef5ccd6cSJohn Marino
78*ef5ccd6cSJohn Marino static inline size_t
perf_event_mmap_size(const struct btrace_target_info * tinfo)79*ef5ccd6cSJohn Marino perf_event_mmap_size (const struct btrace_target_info *tinfo)
80*ef5ccd6cSJohn Marino {
81*ef5ccd6cSJohn Marino /* The branch trace buffer is preceded by a configuration page. */
82*ef5ccd6cSJohn Marino return (tinfo->size + 1) * PAGE_SIZE;
83*ef5ccd6cSJohn Marino }
84*ef5ccd6cSJohn Marino
85*ef5ccd6cSJohn Marino /* Get the size of the perf_event buffer. */
86*ef5ccd6cSJohn Marino
87*ef5ccd6cSJohn Marino static inline size_t
perf_event_buffer_size(struct btrace_target_info * tinfo)88*ef5ccd6cSJohn Marino perf_event_buffer_size (struct btrace_target_info* tinfo)
89*ef5ccd6cSJohn Marino {
90*ef5ccd6cSJohn Marino return tinfo->size * PAGE_SIZE;
91*ef5ccd6cSJohn Marino }
92*ef5ccd6cSJohn Marino
93*ef5ccd6cSJohn Marino /* Get the start address of the perf_event buffer. */
94*ef5ccd6cSJohn Marino
95*ef5ccd6cSJohn Marino static inline const uint8_t *
perf_event_buffer_begin(struct btrace_target_info * tinfo)96*ef5ccd6cSJohn Marino perf_event_buffer_begin (struct btrace_target_info* tinfo)
97*ef5ccd6cSJohn Marino {
98*ef5ccd6cSJohn Marino return ((const uint8_t *) tinfo->buffer) + PAGE_SIZE;
99*ef5ccd6cSJohn Marino }
100*ef5ccd6cSJohn Marino
101*ef5ccd6cSJohn Marino /* Get the end address of the perf_event buffer. */
102*ef5ccd6cSJohn Marino
103*ef5ccd6cSJohn Marino static inline const uint8_t *
perf_event_buffer_end(struct btrace_target_info * tinfo)104*ef5ccd6cSJohn Marino perf_event_buffer_end (struct btrace_target_info* tinfo)
105*ef5ccd6cSJohn Marino {
106*ef5ccd6cSJohn Marino return perf_event_buffer_begin (tinfo) + perf_event_buffer_size (tinfo);
107*ef5ccd6cSJohn Marino }
108*ef5ccd6cSJohn Marino
109*ef5ccd6cSJohn Marino /* Check whether an address is in the kernel. */
110*ef5ccd6cSJohn Marino
111*ef5ccd6cSJohn Marino static inline int
perf_event_is_kernel_addr(const struct btrace_target_info * tinfo,uint64_t addr)112*ef5ccd6cSJohn Marino perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
113*ef5ccd6cSJohn Marino uint64_t addr)
114*ef5ccd6cSJohn Marino {
115*ef5ccd6cSJohn Marino uint64_t mask;
116*ef5ccd6cSJohn Marino
117*ef5ccd6cSJohn Marino /* If we don't know the size of a pointer, we can't check. Let's assume it's
118*ef5ccd6cSJohn Marino not a kernel address in this case. */
119*ef5ccd6cSJohn Marino if (tinfo->ptr_bits == 0)
120*ef5ccd6cSJohn Marino return 0;
121*ef5ccd6cSJohn Marino
122*ef5ccd6cSJohn Marino /* A bit mask for the most significant bit in an address. */
123*ef5ccd6cSJohn Marino mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
124*ef5ccd6cSJohn Marino
125*ef5ccd6cSJohn Marino /* Check whether the most significant bit in the address is set. */
126*ef5ccd6cSJohn Marino return (addr & mask) != 0;
127*ef5ccd6cSJohn Marino }
128*ef5ccd6cSJohn Marino
129*ef5ccd6cSJohn Marino /* Check whether a perf event record should be skipped. */
130*ef5ccd6cSJohn Marino
131*ef5ccd6cSJohn Marino static inline int
perf_event_skip_record(const struct btrace_target_info * tinfo,const struct perf_event_bts * bts)132*ef5ccd6cSJohn Marino perf_event_skip_record (const struct btrace_target_info *tinfo,
133*ef5ccd6cSJohn Marino const struct perf_event_bts *bts)
134*ef5ccd6cSJohn Marino {
135*ef5ccd6cSJohn Marino /* The hardware may report branches from kernel into user space. Branches
136*ef5ccd6cSJohn Marino from user into kernel space will be suppressed. We filter the former to
137*ef5ccd6cSJohn Marino provide a consistent branch trace excluding kernel. */
138*ef5ccd6cSJohn Marino return perf_event_is_kernel_addr (tinfo, bts->from);
139*ef5ccd6cSJohn Marino }
140*ef5ccd6cSJohn Marino
141*ef5ccd6cSJohn Marino /* Perform a few consistency checks on a perf event sample record. This is
142*ef5ccd6cSJohn Marino meant to catch cases when we get out of sync with the perf event stream. */
143*ef5ccd6cSJohn Marino
144*ef5ccd6cSJohn Marino static inline int
perf_event_sample_ok(const struct perf_event_sample * sample)145*ef5ccd6cSJohn Marino perf_event_sample_ok (const struct perf_event_sample *sample)
146*ef5ccd6cSJohn Marino {
147*ef5ccd6cSJohn Marino if (sample->header.type != PERF_RECORD_SAMPLE)
148*ef5ccd6cSJohn Marino return 0;
149*ef5ccd6cSJohn Marino
150*ef5ccd6cSJohn Marino if (sample->header.size != sizeof (*sample))
151*ef5ccd6cSJohn Marino return 0;
152*ef5ccd6cSJohn Marino
153*ef5ccd6cSJohn Marino return 1;
154*ef5ccd6cSJohn Marino }
155*ef5ccd6cSJohn Marino
156*ef5ccd6cSJohn Marino /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
157*ef5ccd6cSJohn Marino and to addresses (plus a header).
158*ef5ccd6cSJohn Marino
159*ef5ccd6cSJohn Marino Start points into that buffer at the next sample position.
160*ef5ccd6cSJohn Marino We read the collected samples backwards from start.
161*ef5ccd6cSJohn Marino
162*ef5ccd6cSJohn Marino While reading the samples, we convert the information into a list of blocks.
163*ef5ccd6cSJohn Marino For two adjacent samples s1 and s2, we form a block b such that b.begin =
164*ef5ccd6cSJohn Marino s1.to and b.end = s2.from.
165*ef5ccd6cSJohn Marino
166*ef5ccd6cSJohn Marino In case the buffer overflows during sampling, one sample may have its lower
167*ef5ccd6cSJohn Marino part at the end and its upper part at the beginning of the buffer. */
168*ef5ccd6cSJohn Marino
VEC(btrace_block_s)169*ef5ccd6cSJohn Marino static VEC (btrace_block_s) *
170*ef5ccd6cSJohn Marino perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
171*ef5ccd6cSJohn Marino const uint8_t *end, const uint8_t *start)
172*ef5ccd6cSJohn Marino {
173*ef5ccd6cSJohn Marino VEC (btrace_block_s) *btrace = NULL;
174*ef5ccd6cSJohn Marino struct perf_event_sample sample;
175*ef5ccd6cSJohn Marino size_t read = 0, size = (end - begin);
176*ef5ccd6cSJohn Marino struct btrace_block block = { 0, 0 };
177*ef5ccd6cSJohn Marino struct regcache *regcache;
178*ef5ccd6cSJohn Marino
179*ef5ccd6cSJohn Marino gdb_assert (begin <= start);
180*ef5ccd6cSJohn Marino gdb_assert (start <= end);
181*ef5ccd6cSJohn Marino
182*ef5ccd6cSJohn Marino /* The first block ends at the current pc. */
183*ef5ccd6cSJohn Marino #ifdef GDBSERVER
184*ef5ccd6cSJohn Marino regcache = get_thread_regcache (find_thread_ptid (tinfo->ptid), 1);
185*ef5ccd6cSJohn Marino #else
186*ef5ccd6cSJohn Marino regcache = get_thread_regcache (tinfo->ptid);
187*ef5ccd6cSJohn Marino #endif
188*ef5ccd6cSJohn Marino block.end = regcache_read_pc (regcache);
189*ef5ccd6cSJohn Marino
190*ef5ccd6cSJohn Marino /* The buffer may contain a partial record as its last entry (i.e. when the
191*ef5ccd6cSJohn Marino buffer size is not a multiple of the sample size). */
192*ef5ccd6cSJohn Marino read = sizeof (sample) - 1;
193*ef5ccd6cSJohn Marino
194*ef5ccd6cSJohn Marino for (; read < size; read += sizeof (sample))
195*ef5ccd6cSJohn Marino {
196*ef5ccd6cSJohn Marino const struct perf_event_sample *psample;
197*ef5ccd6cSJohn Marino
198*ef5ccd6cSJohn Marino /* Find the next perf_event sample in a backwards traversal. */
199*ef5ccd6cSJohn Marino start -= sizeof (sample);
200*ef5ccd6cSJohn Marino
201*ef5ccd6cSJohn Marino /* If we're still inside the buffer, we're done. */
202*ef5ccd6cSJohn Marino if (begin <= start)
203*ef5ccd6cSJohn Marino psample = (const struct perf_event_sample *) start;
204*ef5ccd6cSJohn Marino else
205*ef5ccd6cSJohn Marino {
206*ef5ccd6cSJohn Marino int missing;
207*ef5ccd6cSJohn Marino
208*ef5ccd6cSJohn Marino /* We're to the left of the ring buffer, we will wrap around and
209*ef5ccd6cSJohn Marino reappear at the very right of the ring buffer. */
210*ef5ccd6cSJohn Marino
211*ef5ccd6cSJohn Marino missing = (begin - start);
212*ef5ccd6cSJohn Marino start = (end - missing);
213*ef5ccd6cSJohn Marino
214*ef5ccd6cSJohn Marino /* If the entire sample is missing, we're done. */
215*ef5ccd6cSJohn Marino if (missing == sizeof (sample))
216*ef5ccd6cSJohn Marino psample = (const struct perf_event_sample *) start;
217*ef5ccd6cSJohn Marino else
218*ef5ccd6cSJohn Marino {
219*ef5ccd6cSJohn Marino uint8_t *stack;
220*ef5ccd6cSJohn Marino
221*ef5ccd6cSJohn Marino /* The sample wrapped around. The lower part is at the end and
222*ef5ccd6cSJohn Marino the upper part is at the beginning of the buffer. */
223*ef5ccd6cSJohn Marino stack = (uint8_t *) &sample;
224*ef5ccd6cSJohn Marino
225*ef5ccd6cSJohn Marino /* Copy the two parts so we have a contiguous sample. */
226*ef5ccd6cSJohn Marino memcpy (stack, start, missing);
227*ef5ccd6cSJohn Marino memcpy (stack + missing, begin, sizeof (sample) - missing);
228*ef5ccd6cSJohn Marino
229*ef5ccd6cSJohn Marino psample = &sample;
230*ef5ccd6cSJohn Marino }
231*ef5ccd6cSJohn Marino }
232*ef5ccd6cSJohn Marino
233*ef5ccd6cSJohn Marino if (!perf_event_sample_ok (psample))
234*ef5ccd6cSJohn Marino {
235*ef5ccd6cSJohn Marino warning (_("Branch trace may be incomplete."));
236*ef5ccd6cSJohn Marino break;
237*ef5ccd6cSJohn Marino }
238*ef5ccd6cSJohn Marino
239*ef5ccd6cSJohn Marino if (perf_event_skip_record (tinfo, &psample->bts))
240*ef5ccd6cSJohn Marino continue;
241*ef5ccd6cSJohn Marino
242*ef5ccd6cSJohn Marino /* We found a valid sample, so we can complete the current block. */
243*ef5ccd6cSJohn Marino block.begin = psample->bts.to;
244*ef5ccd6cSJohn Marino
245*ef5ccd6cSJohn Marino VEC_safe_push (btrace_block_s, btrace, &block);
246*ef5ccd6cSJohn Marino
247*ef5ccd6cSJohn Marino /* Start the next block. */
248*ef5ccd6cSJohn Marino block.end = psample->bts.from;
249*ef5ccd6cSJohn Marino }
250*ef5ccd6cSJohn Marino
251*ef5ccd6cSJohn Marino return btrace;
252*ef5ccd6cSJohn Marino }
253*ef5ccd6cSJohn Marino
254*ef5ccd6cSJohn Marino /* Check whether the kernel supports branch tracing. */
255*ef5ccd6cSJohn Marino
256*ef5ccd6cSJohn Marino static int
kernel_supports_btrace(void)257*ef5ccd6cSJohn Marino kernel_supports_btrace (void)
258*ef5ccd6cSJohn Marino {
259*ef5ccd6cSJohn Marino struct perf_event_attr attr;
260*ef5ccd6cSJohn Marino pid_t child, pid;
261*ef5ccd6cSJohn Marino int status, file;
262*ef5ccd6cSJohn Marino
263*ef5ccd6cSJohn Marino errno = 0;
264*ef5ccd6cSJohn Marino child = fork ();
265*ef5ccd6cSJohn Marino switch (child)
266*ef5ccd6cSJohn Marino {
267*ef5ccd6cSJohn Marino case -1:
268*ef5ccd6cSJohn Marino warning (_("test branch tracing: cannot fork: %s."), strerror (errno));
269*ef5ccd6cSJohn Marino return 0;
270*ef5ccd6cSJohn Marino
271*ef5ccd6cSJohn Marino case 0:
272*ef5ccd6cSJohn Marino status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
273*ef5ccd6cSJohn Marino if (status != 0)
274*ef5ccd6cSJohn Marino {
275*ef5ccd6cSJohn Marino warning (_("test branch tracing: cannot PTRACE_TRACEME: %s."),
276*ef5ccd6cSJohn Marino strerror (errno));
277*ef5ccd6cSJohn Marino _exit (1);
278*ef5ccd6cSJohn Marino }
279*ef5ccd6cSJohn Marino
280*ef5ccd6cSJohn Marino status = raise (SIGTRAP);
281*ef5ccd6cSJohn Marino if (status != 0)
282*ef5ccd6cSJohn Marino {
283*ef5ccd6cSJohn Marino warning (_("test branch tracing: cannot raise SIGTRAP: %s."),
284*ef5ccd6cSJohn Marino strerror (errno));
285*ef5ccd6cSJohn Marino _exit (1);
286*ef5ccd6cSJohn Marino }
287*ef5ccd6cSJohn Marino
288*ef5ccd6cSJohn Marino _exit (1);
289*ef5ccd6cSJohn Marino
290*ef5ccd6cSJohn Marino default:
291*ef5ccd6cSJohn Marino pid = waitpid (child, &status, 0);
292*ef5ccd6cSJohn Marino if (pid != child)
293*ef5ccd6cSJohn Marino {
294*ef5ccd6cSJohn Marino warning (_("test branch tracing: bad pid %ld, error: %s."),
295*ef5ccd6cSJohn Marino (long) pid, strerror (errno));
296*ef5ccd6cSJohn Marino return 0;
297*ef5ccd6cSJohn Marino }
298*ef5ccd6cSJohn Marino
299*ef5ccd6cSJohn Marino if (!WIFSTOPPED (status))
300*ef5ccd6cSJohn Marino {
301*ef5ccd6cSJohn Marino warning (_("test branch tracing: expected stop. status: %d."),
302*ef5ccd6cSJohn Marino status);
303*ef5ccd6cSJohn Marino return 0;
304*ef5ccd6cSJohn Marino }
305*ef5ccd6cSJohn Marino
306*ef5ccd6cSJohn Marino memset (&attr, 0, sizeof (attr));
307*ef5ccd6cSJohn Marino
308*ef5ccd6cSJohn Marino attr.type = PERF_TYPE_HARDWARE;
309*ef5ccd6cSJohn Marino attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
310*ef5ccd6cSJohn Marino attr.sample_period = 1;
311*ef5ccd6cSJohn Marino attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
312*ef5ccd6cSJohn Marino attr.exclude_kernel = 1;
313*ef5ccd6cSJohn Marino attr.exclude_hv = 1;
314*ef5ccd6cSJohn Marino attr.exclude_idle = 1;
315*ef5ccd6cSJohn Marino
316*ef5ccd6cSJohn Marino file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
317*ef5ccd6cSJohn Marino if (file >= 0)
318*ef5ccd6cSJohn Marino close (file);
319*ef5ccd6cSJohn Marino
320*ef5ccd6cSJohn Marino kill (child, SIGKILL);
321*ef5ccd6cSJohn Marino ptrace (PTRACE_KILL, child, NULL, NULL);
322*ef5ccd6cSJohn Marino
323*ef5ccd6cSJohn Marino pid = waitpid (child, &status, 0);
324*ef5ccd6cSJohn Marino if (pid != child)
325*ef5ccd6cSJohn Marino {
326*ef5ccd6cSJohn Marino warning (_("test branch tracing: bad pid %ld, error: %s."),
327*ef5ccd6cSJohn Marino (long) pid, strerror (errno));
328*ef5ccd6cSJohn Marino if (!WIFSIGNALED (status))
329*ef5ccd6cSJohn Marino warning (_("test branch tracing: expected killed. status: %d."),
330*ef5ccd6cSJohn Marino status);
331*ef5ccd6cSJohn Marino }
332*ef5ccd6cSJohn Marino
333*ef5ccd6cSJohn Marino return (file >= 0);
334*ef5ccd6cSJohn Marino }
335*ef5ccd6cSJohn Marino }
336*ef5ccd6cSJohn Marino
337*ef5ccd6cSJohn Marino /* Check whether an Intel cpu supports branch tracing. */
338*ef5ccd6cSJohn Marino
339*ef5ccd6cSJohn Marino static int
intel_supports_btrace(void)340*ef5ccd6cSJohn Marino intel_supports_btrace (void)
341*ef5ccd6cSJohn Marino {
342*ef5ccd6cSJohn Marino #if defined __i386__ || defined __x86_64__
343*ef5ccd6cSJohn Marino unsigned int cpuid, model, family;
344*ef5ccd6cSJohn Marino
345*ef5ccd6cSJohn Marino __asm__ __volatile__ ("movl $1, %%eax;"
346*ef5ccd6cSJohn Marino "cpuid;"
347*ef5ccd6cSJohn Marino : "=a" (cpuid)
348*ef5ccd6cSJohn Marino :: "%ebx", "%ecx", "%edx");
349*ef5ccd6cSJohn Marino
350*ef5ccd6cSJohn Marino family = (cpuid >> 8) & 0xf;
351*ef5ccd6cSJohn Marino model = (cpuid >> 4) & 0xf;
352*ef5ccd6cSJohn Marino
353*ef5ccd6cSJohn Marino switch (family)
354*ef5ccd6cSJohn Marino {
355*ef5ccd6cSJohn Marino case 0x6:
356*ef5ccd6cSJohn Marino model += (cpuid >> 12) & 0xf0;
357*ef5ccd6cSJohn Marino
358*ef5ccd6cSJohn Marino switch (model)
359*ef5ccd6cSJohn Marino {
360*ef5ccd6cSJohn Marino case 0x1a: /* Nehalem */
361*ef5ccd6cSJohn Marino case 0x1f:
362*ef5ccd6cSJohn Marino case 0x1e:
363*ef5ccd6cSJohn Marino case 0x2e:
364*ef5ccd6cSJohn Marino case 0x25: /* Westmere */
365*ef5ccd6cSJohn Marino case 0x2c:
366*ef5ccd6cSJohn Marino case 0x2f:
367*ef5ccd6cSJohn Marino case 0x2a: /* Sandy Bridge */
368*ef5ccd6cSJohn Marino case 0x2d:
369*ef5ccd6cSJohn Marino case 0x3a: /* Ivy Bridge */
370*ef5ccd6cSJohn Marino
371*ef5ccd6cSJohn Marino /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
372*ef5ccd6cSJohn Marino "from" information afer an EIST transition, T-states, C1E, or
373*ef5ccd6cSJohn Marino Adaptive Thermal Throttling. */
374*ef5ccd6cSJohn Marino return 0;
375*ef5ccd6cSJohn Marino }
376*ef5ccd6cSJohn Marino }
377*ef5ccd6cSJohn Marino
378*ef5ccd6cSJohn Marino return 1;
379*ef5ccd6cSJohn Marino
380*ef5ccd6cSJohn Marino #else /* !defined __i386__ && !defined __x86_64__ */
381*ef5ccd6cSJohn Marino
382*ef5ccd6cSJohn Marino return 0;
383*ef5ccd6cSJohn Marino
384*ef5ccd6cSJohn Marino #endif /* !defined __i386__ && !defined __x86_64__ */
385*ef5ccd6cSJohn Marino }
386*ef5ccd6cSJohn Marino
387*ef5ccd6cSJohn Marino /* Check whether the cpu supports branch tracing. */
388*ef5ccd6cSJohn Marino
389*ef5ccd6cSJohn Marino static int
cpu_supports_btrace(void)390*ef5ccd6cSJohn Marino cpu_supports_btrace (void)
391*ef5ccd6cSJohn Marino {
392*ef5ccd6cSJohn Marino #if defined __i386__ || defined __x86_64__
393*ef5ccd6cSJohn Marino char vendor[13];
394*ef5ccd6cSJohn Marino
395*ef5ccd6cSJohn Marino __asm__ __volatile__ ("xorl %%ebx, %%ebx;"
396*ef5ccd6cSJohn Marino "xorl %%ecx, %%ecx;"
397*ef5ccd6cSJohn Marino "xorl %%edx, %%edx;"
398*ef5ccd6cSJohn Marino "movl $0, %%eax;"
399*ef5ccd6cSJohn Marino "cpuid;"
400*ef5ccd6cSJohn Marino "movl %%ebx, %0;"
401*ef5ccd6cSJohn Marino "movl %%edx, %1;"
402*ef5ccd6cSJohn Marino "movl %%ecx, %2;"
403*ef5ccd6cSJohn Marino : "=m" (vendor[0]),
404*ef5ccd6cSJohn Marino "=m" (vendor[4]),
405*ef5ccd6cSJohn Marino "=m" (vendor[8])
406*ef5ccd6cSJohn Marino :
407*ef5ccd6cSJohn Marino : "%eax", "%ebx", "%ecx", "%edx");
408*ef5ccd6cSJohn Marino vendor[12] = '\0';
409*ef5ccd6cSJohn Marino
410*ef5ccd6cSJohn Marino if (strcmp (vendor, "GenuineIntel") == 0)
411*ef5ccd6cSJohn Marino return intel_supports_btrace ();
412*ef5ccd6cSJohn Marino
413*ef5ccd6cSJohn Marino /* Don't know about others. Let's assume they do. */
414*ef5ccd6cSJohn Marino return 1;
415*ef5ccd6cSJohn Marino
416*ef5ccd6cSJohn Marino #else /* !defined __i386__ && !defined __x86_64__ */
417*ef5ccd6cSJohn Marino
418*ef5ccd6cSJohn Marino return 0;
419*ef5ccd6cSJohn Marino
420*ef5ccd6cSJohn Marino #endif /* !defined __i386__ && !defined __x86_64__ */
421*ef5ccd6cSJohn Marino }
422*ef5ccd6cSJohn Marino
423*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
424*ef5ccd6cSJohn Marino
425*ef5ccd6cSJohn Marino int
linux_supports_btrace(void)426*ef5ccd6cSJohn Marino linux_supports_btrace (void)
427*ef5ccd6cSJohn Marino {
428*ef5ccd6cSJohn Marino static int cached;
429*ef5ccd6cSJohn Marino
430*ef5ccd6cSJohn Marino if (cached == 0)
431*ef5ccd6cSJohn Marino {
432*ef5ccd6cSJohn Marino if (!kernel_supports_btrace ())
433*ef5ccd6cSJohn Marino cached = -1;
434*ef5ccd6cSJohn Marino else if (!cpu_supports_btrace ())
435*ef5ccd6cSJohn Marino cached = -1;
436*ef5ccd6cSJohn Marino else
437*ef5ccd6cSJohn Marino cached = 1;
438*ef5ccd6cSJohn Marino }
439*ef5ccd6cSJohn Marino
440*ef5ccd6cSJohn Marino return cached > 0;
441*ef5ccd6cSJohn Marino }
442*ef5ccd6cSJohn Marino
443*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
444*ef5ccd6cSJohn Marino
445*ef5ccd6cSJohn Marino struct btrace_target_info *
linux_enable_btrace(ptid_t ptid)446*ef5ccd6cSJohn Marino linux_enable_btrace (ptid_t ptid)
447*ef5ccd6cSJohn Marino {
448*ef5ccd6cSJohn Marino struct btrace_target_info *tinfo;
449*ef5ccd6cSJohn Marino int pid;
450*ef5ccd6cSJohn Marino
451*ef5ccd6cSJohn Marino tinfo = xzalloc (sizeof (*tinfo));
452*ef5ccd6cSJohn Marino tinfo->ptid = ptid;
453*ef5ccd6cSJohn Marino
454*ef5ccd6cSJohn Marino tinfo->attr.size = sizeof (tinfo->attr);
455*ef5ccd6cSJohn Marino tinfo->attr.type = PERF_TYPE_HARDWARE;
456*ef5ccd6cSJohn Marino tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
457*ef5ccd6cSJohn Marino tinfo->attr.sample_period = 1;
458*ef5ccd6cSJohn Marino
459*ef5ccd6cSJohn Marino /* We sample from and to address. */
460*ef5ccd6cSJohn Marino tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
461*ef5ccd6cSJohn Marino
462*ef5ccd6cSJohn Marino tinfo->attr.exclude_kernel = 1;
463*ef5ccd6cSJohn Marino tinfo->attr.exclude_hv = 1;
464*ef5ccd6cSJohn Marino tinfo->attr.exclude_idle = 1;
465*ef5ccd6cSJohn Marino
466*ef5ccd6cSJohn Marino tinfo->ptr_bits = 0;
467*ef5ccd6cSJohn Marino
468*ef5ccd6cSJohn Marino pid = ptid_get_lwp (ptid);
469*ef5ccd6cSJohn Marino if (pid == 0)
470*ef5ccd6cSJohn Marino pid = ptid_get_pid (ptid);
471*ef5ccd6cSJohn Marino
472*ef5ccd6cSJohn Marino errno = 0;
473*ef5ccd6cSJohn Marino tinfo->file = syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0);
474*ef5ccd6cSJohn Marino if (tinfo->file < 0)
475*ef5ccd6cSJohn Marino goto err;
476*ef5ccd6cSJohn Marino
477*ef5ccd6cSJohn Marino /* We hard-code the trace buffer size.
478*ef5ccd6cSJohn Marino At some later time, we should make this configurable. */
479*ef5ccd6cSJohn Marino tinfo->size = 1;
480*ef5ccd6cSJohn Marino tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
481*ef5ccd6cSJohn Marino PROT_READ, MAP_SHARED, tinfo->file, 0);
482*ef5ccd6cSJohn Marino if (tinfo->buffer == MAP_FAILED)
483*ef5ccd6cSJohn Marino goto err_file;
484*ef5ccd6cSJohn Marino
485*ef5ccd6cSJohn Marino return tinfo;
486*ef5ccd6cSJohn Marino
487*ef5ccd6cSJohn Marino err_file:
488*ef5ccd6cSJohn Marino close (tinfo->file);
489*ef5ccd6cSJohn Marino
490*ef5ccd6cSJohn Marino err:
491*ef5ccd6cSJohn Marino xfree (tinfo);
492*ef5ccd6cSJohn Marino return NULL;
493*ef5ccd6cSJohn Marino }
494*ef5ccd6cSJohn Marino
495*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
496*ef5ccd6cSJohn Marino
497*ef5ccd6cSJohn Marino int
linux_disable_btrace(struct btrace_target_info * tinfo)498*ef5ccd6cSJohn Marino linux_disable_btrace (struct btrace_target_info *tinfo)
499*ef5ccd6cSJohn Marino {
500*ef5ccd6cSJohn Marino int errcode;
501*ef5ccd6cSJohn Marino
502*ef5ccd6cSJohn Marino errno = 0;
503*ef5ccd6cSJohn Marino errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
504*ef5ccd6cSJohn Marino if (errcode != 0)
505*ef5ccd6cSJohn Marino return errno;
506*ef5ccd6cSJohn Marino
507*ef5ccd6cSJohn Marino close (tinfo->file);
508*ef5ccd6cSJohn Marino xfree (tinfo);
509*ef5ccd6cSJohn Marino
510*ef5ccd6cSJohn Marino return 0;
511*ef5ccd6cSJohn Marino }
512*ef5ccd6cSJohn Marino
513*ef5ccd6cSJohn Marino /* Check whether the branch trace has changed. */
514*ef5ccd6cSJohn Marino
515*ef5ccd6cSJohn Marino static int
linux_btrace_has_changed(struct btrace_target_info * tinfo)516*ef5ccd6cSJohn Marino linux_btrace_has_changed (struct btrace_target_info *tinfo)
517*ef5ccd6cSJohn Marino {
518*ef5ccd6cSJohn Marino volatile struct perf_event_mmap_page *header = perf_event_header (tinfo);
519*ef5ccd6cSJohn Marino
520*ef5ccd6cSJohn Marino return header->data_head != tinfo->data_head;
521*ef5ccd6cSJohn Marino }
522*ef5ccd6cSJohn Marino
523*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
524*ef5ccd6cSJohn Marino
VEC(btrace_block_s)525*ef5ccd6cSJohn Marino VEC (btrace_block_s) *
526*ef5ccd6cSJohn Marino linux_read_btrace (struct btrace_target_info *tinfo,
527*ef5ccd6cSJohn Marino enum btrace_read_type type)
528*ef5ccd6cSJohn Marino {
529*ef5ccd6cSJohn Marino VEC (btrace_block_s) *btrace = NULL;
530*ef5ccd6cSJohn Marino volatile struct perf_event_mmap_page *header;
531*ef5ccd6cSJohn Marino const uint8_t *begin, *end, *start;
532*ef5ccd6cSJohn Marino unsigned long data_head, retries = 5;
533*ef5ccd6cSJohn Marino size_t buffer_size;
534*ef5ccd6cSJohn Marino
535*ef5ccd6cSJohn Marino if (type == btrace_read_new && !linux_btrace_has_changed (tinfo))
536*ef5ccd6cSJohn Marino return NULL;
537*ef5ccd6cSJohn Marino
538*ef5ccd6cSJohn Marino header = perf_event_header (tinfo);
539*ef5ccd6cSJohn Marino buffer_size = perf_event_buffer_size (tinfo);
540*ef5ccd6cSJohn Marino
541*ef5ccd6cSJohn Marino /* We may need to retry reading the trace. See below. */
542*ef5ccd6cSJohn Marino while (retries--)
543*ef5ccd6cSJohn Marino {
544*ef5ccd6cSJohn Marino data_head = header->data_head;
545*ef5ccd6cSJohn Marino
546*ef5ccd6cSJohn Marino /* If there's new trace, let's read it. */
547*ef5ccd6cSJohn Marino if (data_head != tinfo->data_head)
548*ef5ccd6cSJohn Marino {
549*ef5ccd6cSJohn Marino /* Data_head keeps growing; the buffer itself is circular. */
550*ef5ccd6cSJohn Marino begin = perf_event_buffer_begin (tinfo);
551*ef5ccd6cSJohn Marino start = begin + data_head % buffer_size;
552*ef5ccd6cSJohn Marino
553*ef5ccd6cSJohn Marino if (data_head <= buffer_size)
554*ef5ccd6cSJohn Marino end = start;
555*ef5ccd6cSJohn Marino else
556*ef5ccd6cSJohn Marino end = perf_event_buffer_end (tinfo);
557*ef5ccd6cSJohn Marino
558*ef5ccd6cSJohn Marino btrace = perf_event_read_bts (tinfo, begin, end, start);
559*ef5ccd6cSJohn Marino }
560*ef5ccd6cSJohn Marino
561*ef5ccd6cSJohn Marino /* The stopping thread notifies its ptracer before it is scheduled out.
562*ef5ccd6cSJohn Marino On multi-core systems, the debugger might therefore run while the
563*ef5ccd6cSJohn Marino kernel might be writing the last branch trace records.
564*ef5ccd6cSJohn Marino
565*ef5ccd6cSJohn Marino Let's check whether the data head moved while we read the trace. */
566*ef5ccd6cSJohn Marino if (data_head == header->data_head)
567*ef5ccd6cSJohn Marino break;
568*ef5ccd6cSJohn Marino }
569*ef5ccd6cSJohn Marino
570*ef5ccd6cSJohn Marino tinfo->data_head = data_head;
571*ef5ccd6cSJohn Marino
572*ef5ccd6cSJohn Marino return btrace;
573*ef5ccd6cSJohn Marino }
574*ef5ccd6cSJohn Marino
575*ef5ccd6cSJohn Marino #else /* !HAVE_LINUX_PERF_EVENT_H */
576*ef5ccd6cSJohn Marino
577*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
578*ef5ccd6cSJohn Marino
579*ef5ccd6cSJohn Marino int
linux_supports_btrace(void)580*ef5ccd6cSJohn Marino linux_supports_btrace (void)
581*ef5ccd6cSJohn Marino {
582*ef5ccd6cSJohn Marino return 0;
583*ef5ccd6cSJohn Marino }
584*ef5ccd6cSJohn Marino
585*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
586*ef5ccd6cSJohn Marino
587*ef5ccd6cSJohn Marino struct btrace_target_info *
linux_enable_btrace(ptid_t ptid)588*ef5ccd6cSJohn Marino linux_enable_btrace (ptid_t ptid)
589*ef5ccd6cSJohn Marino {
590*ef5ccd6cSJohn Marino return NULL;
591*ef5ccd6cSJohn Marino }
592*ef5ccd6cSJohn Marino
593*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
594*ef5ccd6cSJohn Marino
595*ef5ccd6cSJohn Marino int
linux_disable_btrace(struct btrace_target_info * tinfo)596*ef5ccd6cSJohn Marino linux_disable_btrace (struct btrace_target_info *tinfo)
597*ef5ccd6cSJohn Marino {
598*ef5ccd6cSJohn Marino return ENOSYS;
599*ef5ccd6cSJohn Marino }
600*ef5ccd6cSJohn Marino
601*ef5ccd6cSJohn Marino /* See linux-btrace.h. */
602*ef5ccd6cSJohn Marino
VEC(btrace_block_s)603*ef5ccd6cSJohn Marino VEC (btrace_block_s) *
604*ef5ccd6cSJohn Marino linux_read_btrace (struct btrace_target_info *tinfo,
605*ef5ccd6cSJohn Marino enum btrace_read_type type)
606*ef5ccd6cSJohn Marino {
607*ef5ccd6cSJohn Marino return NULL;
608*ef5ccd6cSJohn Marino }
609*ef5ccd6cSJohn Marino
610*ef5ccd6cSJohn Marino #endif /* !HAVE_LINUX_PERF_EVENT_H */
611