xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/btrace.c (revision 5dd36a3bc8bf2a9dec29ceb6349550414570c447)
1 /* Branch trace support for GDB, the GNU debugger.
2 
3    Copyright (C) 2013-2017 Free Software Foundation, Inc.
4 
5    Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37 
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41 
42 /* Command lists for btrace maintenance commands.  */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48 
49 /* Control whether to skip PAD packets when computing the packet history.  */
50 static int maint_btrace_pt_skip_pad = 1;
51 
52 /* A vector of function segments.  */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55 
56 static void btrace_add_pc (struct thread_info *tp);
57 
58 /* Print a record debug message.  Use do ... while (0) to avoid ambiguities
59    when used in if statements.  */
60 
61 #define DEBUG(msg, args...)						\
62   do									\
63     {									\
64       if (record_debug != 0)						\
65         fprintf_unfiltered (gdb_stdlog,					\
66 			    "[btrace] " msg "\n", ##args);		\
67     }									\
68   while (0)
69 
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71 
72 /* Return the function name of a recorded function segment for printing.
73    This function never returns NULL.  */
74 
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78   struct minimal_symbol *msym;
79   struct symbol *sym;
80 
81   msym = bfun->msym;
82   sym = bfun->sym;
83 
84   if (sym != NULL)
85     return SYMBOL_PRINT_NAME (sym);
86 
87   if (msym != NULL)
88     return MSYMBOL_PRINT_NAME (msym);
89 
90   return "<unknown>";
91 }
92 
93 /* Return the file name of a recorded function segment for printing.
94    This function never returns NULL.  */
95 
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99   struct symbol *sym;
100   const char *filename;
101 
102   sym = bfun->sym;
103 
104   if (sym != NULL)
105     filename = symtab_to_filename_for_display (symbol_symtab (sym));
106   else
107     filename = "<unknown>";
108 
109   return filename;
110 }
111 
112 /* Return a string representation of the address of an instruction.
113    This function never returns NULL.  */
114 
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118   if (insn == NULL)
119     return "<nil>";
120 
121   return core_addr_to_string_nz (insn->pc);
122 }
123 
124 /* Print an ftrace debug status message.  */
125 
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129   const char *fun, *file;
130   unsigned int ibegin, iend;
131   int level;
132 
133   fun = ftrace_print_function_name (bfun);
134   file = ftrace_print_filename (bfun);
135   level = bfun->level;
136 
137   ibegin = bfun->insn_offset;
138   iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139 
140   DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 		prefix, fun, file, level, ibegin, iend);
142 }
143 
144 /* Return the number of instructions in a given function call segment.  */
145 
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149   if (bfun == NULL)
150     return 0;
151 
152   /* A gap is always counted as one instruction.  */
153   if (bfun->errcode != 0)
154     return 1;
155 
156   return VEC_length (btrace_insn_s, bfun->insn);
157 }
158 
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160    return zero otherwise.  */
161 
162 static int
163 ftrace_function_switched (const struct btrace_function *bfun,
164 			  const struct minimal_symbol *mfun,
165 			  const struct symbol *fun)
166 {
167   struct minimal_symbol *msym;
168   struct symbol *sym;
169 
170   msym = bfun->msym;
171   sym = bfun->sym;
172 
173   /* If the minimal symbol changed, we certainly switched functions.  */
174   if (mfun != NULL && msym != NULL
175       && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
176     return 1;
177 
178   /* If the symbol changed, we certainly switched functions.  */
179   if (fun != NULL && sym != NULL)
180     {
181       const char *bfname, *fname;
182 
183       /* Check the function name.  */
184       if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 	return 1;
186 
187       /* Check the location of those functions, as well.  */
188       bfname = symtab_to_fullname (symbol_symtab (sym));
189       fname = symtab_to_fullname (symbol_symtab (fun));
190       if (filename_cmp (fname, bfname) != 0)
191 	return 1;
192     }
193 
194   /* If we lost symbol information, we switched functions.  */
195   if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196     return 1;
197 
198   /* If we gained symbol information, we switched functions.  */
199   if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200     return 1;
201 
202   return 0;
203 }
204 
205 /* Allocate and initialize a new branch trace function segment.
206    PREV is the chronologically preceding function segment.
207    MFUN and FUN are the symbol information we have for this function.  */
208 
209 static struct btrace_function *
210 ftrace_new_function (struct btrace_function *prev,
211 		     struct minimal_symbol *mfun,
212 		     struct symbol *fun)
213 {
214   struct btrace_function *bfun;
215 
216   bfun = XCNEW (struct btrace_function);
217 
218   bfun->msym = mfun;
219   bfun->sym = fun;
220   bfun->flow.prev = prev;
221 
222   if (prev == NULL)
223     {
224       /* Start counting at one.  */
225       bfun->number = 1;
226       bfun->insn_offset = 1;
227     }
228   else
229     {
230       gdb_assert (prev->flow.next == NULL);
231       prev->flow.next = bfun;
232 
233       bfun->number = prev->number + 1;
234       bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
235       bfun->level = prev->level;
236     }
237 
238   return bfun;
239 }
240 
241 /* Update the UP field of a function segment.  */
242 
243 static void
244 ftrace_update_caller (struct btrace_function *bfun,
245 		      struct btrace_function *caller,
246 		      enum btrace_function_flag flags)
247 {
248   if (bfun->up != NULL)
249     ftrace_debug (bfun, "updating caller");
250 
251   bfun->up = caller;
252   bfun->flags = flags;
253 
254   ftrace_debug (bfun, "set caller");
255   ftrace_debug (caller, "..to");
256 }
257 
258 /* Fix up the caller for all segments of a function.  */
259 
260 static void
261 ftrace_fixup_caller (struct btrace_function *bfun,
262 		     struct btrace_function *caller,
263 		     enum btrace_function_flag flags)
264 {
265   struct btrace_function *prev, *next;
266 
267   ftrace_update_caller (bfun, caller, flags);
268 
269   /* Update all function segments belonging to the same function.  */
270   for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271     ftrace_update_caller (prev, caller, flags);
272 
273   for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274     ftrace_update_caller (next, caller, flags);
275 }
276 
277 /* Add a new function segment for a call.
278    CALLER is the chronologically preceding function segment.
279    MFUN and FUN are the symbol information we have for this function.  */
280 
281 static struct btrace_function *
282 ftrace_new_call (struct btrace_function *caller,
283 		 struct minimal_symbol *mfun,
284 		 struct symbol *fun)
285 {
286   struct btrace_function *bfun;
287 
288   bfun = ftrace_new_function (caller, mfun, fun);
289   bfun->up = caller;
290   bfun->level += 1;
291 
292   ftrace_debug (bfun, "new call");
293 
294   return bfun;
295 }
296 
297 /* Add a new function segment for a tail call.
298    CALLER is the chronologically preceding function segment.
299    MFUN and FUN are the symbol information we have for this function.  */
300 
301 static struct btrace_function *
302 ftrace_new_tailcall (struct btrace_function *caller,
303 		     struct minimal_symbol *mfun,
304 		     struct symbol *fun)
305 {
306   struct btrace_function *bfun;
307 
308   bfun = ftrace_new_function (caller, mfun, fun);
309   bfun->up = caller;
310   bfun->level += 1;
311   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
312 
313   ftrace_debug (bfun, "new tail call");
314 
315   return bfun;
316 }
317 
318 /* Return the caller of BFUN or NULL if there is none.  This function skips
319    tail calls in the call chain.  */
320 static struct btrace_function *
321 ftrace_get_caller (struct btrace_function *bfun)
322 {
323   for (; bfun != NULL; bfun = bfun->up)
324     if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
325       return bfun->up;
326 
327   return NULL;
328 }
329 
330 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331    symbol information.  */
332 
333 static struct btrace_function *
334 ftrace_find_caller (struct btrace_function *bfun,
335 		    struct minimal_symbol *mfun,
336 		    struct symbol *fun)
337 {
338   for (; bfun != NULL; bfun = bfun->up)
339     {
340       /* Skip functions with incompatible symbol information.  */
341       if (ftrace_function_switched (bfun, mfun, fun))
342 	continue;
343 
344       /* This is the function segment we're looking for.  */
345       break;
346     }
347 
348   return bfun;
349 }
350 
351 /* Find the innermost caller in the back trace of BFUN, skipping all
352    function segments that do not end with a call instruction (e.g.
353    tail calls ending with a jump).  */
354 
355 static struct btrace_function *
356 ftrace_find_call (struct btrace_function *bfun)
357 {
358   for (; bfun != NULL; bfun = bfun->up)
359     {
360       struct btrace_insn *last;
361 
362       /* Skip gaps.  */
363       if (bfun->errcode != 0)
364 	continue;
365 
366       last = VEC_last (btrace_insn_s, bfun->insn);
367 
368       if (last->iclass == BTRACE_INSN_CALL)
369 	break;
370     }
371 
372   return bfun;
373 }
374 
375 /* Add a continuation segment for a function into which we return.
376    PREV is the chronologically preceding function segment.
377    MFUN and FUN are the symbol information we have for this function.  */
378 
379 static struct btrace_function *
380 ftrace_new_return (struct btrace_function *prev,
381 		   struct minimal_symbol *mfun,
382 		   struct symbol *fun)
383 {
384   struct btrace_function *bfun, *caller;
385 
386   bfun = ftrace_new_function (prev, mfun, fun);
387 
388   /* It is important to start at PREV's caller.  Otherwise, we might find
389      PREV itself, if PREV is a recursive function.  */
390   caller = ftrace_find_caller (prev->up, mfun, fun);
391   if (caller != NULL)
392     {
393       /* The caller of PREV is the preceding btrace function segment in this
394 	 function instance.  */
395       gdb_assert (caller->segment.next == NULL);
396 
397       caller->segment.next = bfun;
398       bfun->segment.prev = caller;
399 
400       /* Maintain the function level.  */
401       bfun->level = caller->level;
402 
403       /* Maintain the call stack.  */
404       bfun->up = caller->up;
405       bfun->flags = caller->flags;
406 
407       ftrace_debug (bfun, "new return");
408     }
409   else
410     {
411       /* We did not find a caller.  This could mean that something went
412 	 wrong or that the call is simply not included in the trace.  */
413 
414       /* Let's search for some actual call.  */
415       caller = ftrace_find_call (prev->up);
416       if (caller == NULL)
417 	{
418 	  /* There is no call in PREV's back trace.  We assume that the
419 	     branch trace did not include it.  */
420 
421 	  /* Let's find the topmost function and add a new caller for it.
422 	     This should handle a series of initial tail calls.  */
423 	  while (prev->up != NULL)
424 	    prev = prev->up;
425 
426 	  bfun->level = prev->level - 1;
427 
428 	  /* Fix up the call stack for PREV.  */
429 	  ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
430 
431 	  ftrace_debug (bfun, "new return - no caller");
432 	}
433       else
434 	{
435 	  /* There is a call in PREV's back trace to which we should have
436 	     returned but didn't.  Let's start a new, separate back trace
437 	     from PREV's level.  */
438 	  bfun->level = prev->level - 1;
439 
440 	  /* We fix up the back trace for PREV but leave other function segments
441 	     on the same level as they are.
442 	     This should handle things like schedule () correctly where we're
443 	     switching contexts.  */
444 	  prev->up = bfun;
445 	  prev->flags = BFUN_UP_LINKS_TO_RET;
446 
447 	  ftrace_debug (bfun, "new return - unknown caller");
448 	}
449     }
450 
451   return bfun;
452 }
453 
454 /* Add a new function segment for a function switch.
455    PREV is the chronologically preceding function segment.
456    MFUN and FUN are the symbol information we have for this function.  */
457 
458 static struct btrace_function *
459 ftrace_new_switch (struct btrace_function *prev,
460 		   struct minimal_symbol *mfun,
461 		   struct symbol *fun)
462 {
463   struct btrace_function *bfun;
464 
465   /* This is an unexplained function switch.  We can't really be sure about the
466      call stack, yet the best I can think of right now is to preserve it.  */
467   bfun = ftrace_new_function (prev, mfun, fun);
468   bfun->up = prev->up;
469   bfun->flags = prev->flags;
470 
471   ftrace_debug (bfun, "new switch");
472 
473   return bfun;
474 }
475 
476 /* Add a new function segment for a gap in the trace due to a decode error.
477    PREV is the chronologically preceding function segment.
478    ERRCODE is the format-specific error code.  */
479 
480 static struct btrace_function *
481 ftrace_new_gap (struct btrace_function *prev, int errcode)
482 {
483   struct btrace_function *bfun;
484 
485   /* We hijack prev if it was empty.  */
486   if (prev != NULL && prev->errcode == 0
487       && VEC_empty (btrace_insn_s, prev->insn))
488     bfun = prev;
489   else
490     bfun = ftrace_new_function (prev, NULL, NULL);
491 
492   bfun->errcode = errcode;
493 
494   ftrace_debug (bfun, "new gap");
495 
496   return bfun;
497 }
498 
499 /* Update BFUN with respect to the instruction at PC.  This may create new
500    function segments.
501    Return the chronologically latest function segment, never NULL.  */
502 
503 static struct btrace_function *
504 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
505 {
506   struct bound_minimal_symbol bmfun;
507   struct minimal_symbol *mfun;
508   struct symbol *fun;
509   struct btrace_insn *last;
510 
511   /* Try to determine the function we're in.  We use both types of symbols
512      to avoid surprises when we sometimes get a full symbol and sometimes
513      only a minimal symbol.  */
514   fun = find_pc_function (pc);
515   bmfun = lookup_minimal_symbol_by_pc (pc);
516   mfun = bmfun.minsym;
517 
518   if (fun == NULL && mfun == NULL)
519     DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
520 
521   /* If we didn't have a function or if we had a gap before, we create one.  */
522   if (bfun == NULL || bfun->errcode != 0)
523     return ftrace_new_function (bfun, mfun, fun);
524 
525   /* Check the last instruction, if we have one.
526      We do this check first, since it allows us to fill in the call stack
527      links in addition to the normal flow links.  */
528   last = NULL;
529   if (!VEC_empty (btrace_insn_s, bfun->insn))
530     last = VEC_last (btrace_insn_s, bfun->insn);
531 
532   if (last != NULL)
533     {
534       switch (last->iclass)
535 	{
536 	case BTRACE_INSN_RETURN:
537 	  {
538 	    const char *fname;
539 
540 	    /* On some systems, _dl_runtime_resolve returns to the resolved
541 	       function instead of jumping to it.  From our perspective,
542 	       however, this is a tailcall.
543 	       If we treated it as return, we wouldn't be able to find the
544 	       resolved function in our stack back trace.  Hence, we would
545 	       lose the current stack back trace and start anew with an empty
546 	       back trace.  When the resolved function returns, we would then
547 	       create a stack back trace with the same function names but
548 	       different frame id's.  This will confuse stepping.  */
549 	    fname = ftrace_print_function_name (bfun);
550 	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 	      return ftrace_new_tailcall (bfun, mfun, fun);
552 
553 	    return ftrace_new_return (bfun, mfun, fun);
554 	  }
555 
556 	case BTRACE_INSN_CALL:
557 	  /* Ignore calls to the next instruction.  They are used for PIC.  */
558 	  if (last->pc + last->size == pc)
559 	    break;
560 
561 	  return ftrace_new_call (bfun, mfun, fun);
562 
563 	case BTRACE_INSN_JUMP:
564 	  {
565 	    CORE_ADDR start;
566 
567 	    start = get_pc_function_start (pc);
568 
569 	    /* A jump to the start of a function is (typically) a tail call.  */
570 	    if (start == pc)
571 	      return ftrace_new_tailcall (bfun, mfun, fun);
572 
573 	    /* If we can't determine the function for PC, we treat a jump at
574 	       the end of the block as tail call if we're switching functions
575 	       and as an intra-function branch if we don't.  */
576 	    if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
577 	      return ftrace_new_tailcall (bfun, mfun, fun);
578 
579 	    break;
580 	  }
581 	}
582     }
583 
584   /* Check if we're switching functions for some other reason.  */
585   if (ftrace_function_switched (bfun, mfun, fun))
586     {
587       DEBUG_FTRACE ("switching from %s in %s at %s",
588 		    ftrace_print_insn_addr (last),
589 		    ftrace_print_function_name (bfun),
590 		    ftrace_print_filename (bfun));
591 
592       return ftrace_new_switch (bfun, mfun, fun);
593     }
594 
595   return bfun;
596 }
597 
598 /* Add the instruction at PC to BFUN's instructions.  */
599 
600 static void
601 ftrace_update_insns (struct btrace_function *bfun,
602 		     const struct btrace_insn *insn)
603 {
604   VEC_safe_push (btrace_insn_s, bfun->insn, insn);
605 
606   if (record_debug > 1)
607     ftrace_debug (bfun, "update insn");
608 }
609 
610 /* Classify the instruction at PC.  */
611 
612 static enum btrace_insn_class
613 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
614 {
615   enum btrace_insn_class iclass;
616 
617   iclass = BTRACE_INSN_OTHER;
618   TRY
619     {
620       if (gdbarch_insn_is_call (gdbarch, pc))
621 	iclass = BTRACE_INSN_CALL;
622       else if (gdbarch_insn_is_ret (gdbarch, pc))
623 	iclass = BTRACE_INSN_RETURN;
624       else if (gdbarch_insn_is_jump (gdbarch, pc))
625 	iclass = BTRACE_INSN_JUMP;
626     }
627   CATCH (error, RETURN_MASK_ERROR)
628     {
629     }
630   END_CATCH
631 
632   return iclass;
633 }
634 
635 /* Try to match the back trace at LHS to the back trace at RHS.  Returns the
636    number of matching function segments or zero if the back traces do not
637    match.  */
638 
639 static int
640 ftrace_match_backtrace (struct btrace_function *lhs,
641 			struct btrace_function *rhs)
642 {
643   int matches;
644 
645   for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
646     {
647       if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
648 	return 0;
649 
650       lhs = ftrace_get_caller (lhs);
651       rhs = ftrace_get_caller (rhs);
652     }
653 
654   return matches;
655 }
656 
657 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.  */
658 
659 static void
660 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
661 {
662   if (adjustment == 0)
663     return;
664 
665   DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666   ftrace_debug (bfun, "..bfun");
667 
668   for (; bfun != NULL; bfun = bfun->flow.next)
669     bfun->level += adjustment;
670 }
671 
672 /* Recompute the global level offset.  Traverse the function trace and compute
673    the global level offset as the negative of the minimal function level.  */
674 
675 static void
676 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
677 {
678   struct btrace_function *bfun, *end;
679   int level;
680 
681   if (btinfo == NULL)
682     return;
683 
684   bfun = btinfo->begin;
685   if (bfun == NULL)
686     return;
687 
688   /* The last function segment contains the current instruction, which is not
689      really part of the trace.  If it contains just this one instruction, we
690      stop when we reach it; otherwise, we let the below loop run to the end.  */
691   end = btinfo->end;
692   if (VEC_length (btrace_insn_s, end->insn) > 1)
693     end = NULL;
694 
695   level = INT_MAX;
696   for (; bfun != end; bfun = bfun->flow.next)
697     level = std::min (level, bfun->level);
698 
699   DEBUG_FTRACE ("setting global level offset: %d", -level);
700   btinfo->level = -level;
701 }
702 
703 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704    ftrace_connect_backtrace.  */
705 
706 static void
707 ftrace_connect_bfun (struct btrace_function *prev,
708 		     struct btrace_function *next)
709 {
710   DEBUG_FTRACE ("connecting...");
711   ftrace_debug (prev, "..prev");
712   ftrace_debug (next, "..next");
713 
714   /* The function segments are not yet connected.  */
715   gdb_assert (prev->segment.next == NULL);
716   gdb_assert (next->segment.prev == NULL);
717 
718   prev->segment.next = next;
719   next->segment.prev = prev;
720 
721   /* We may have moved NEXT to a different function level.  */
722   ftrace_fixup_level (next, prev->level - next->level);
723 
724   /* If we run out of back trace for one, let's use the other's.  */
725   if (prev->up == NULL)
726     {
727       if (next->up != NULL)
728 	{
729 	  DEBUG_FTRACE ("using next's callers");
730 	  ftrace_fixup_caller (prev, next->up, next->flags);
731 	}
732     }
733   else if (next->up == NULL)
734     {
735       if (prev->up != NULL)
736 	{
737 	  DEBUG_FTRACE ("using prev's callers");
738 	  ftrace_fixup_caller (next, prev->up, prev->flags);
739 	}
740     }
741   else
742     {
743       /* PREV may have a tailcall caller, NEXT can't.  If it does, fixup the up
744 	 link to add the tail callers to NEXT's back trace.
745 
746 	 This removes NEXT->UP from NEXT's back trace.  It will be added back
747 	 when connecting NEXT and PREV's callers - provided they exist.
748 
749 	 If PREV's back trace consists of a series of tail calls without an
750 	 actual call, there will be no further connection and NEXT's caller will
751 	 be removed for good.  To catch this case, we handle it here and connect
752 	 the top of PREV's back trace to NEXT's caller.  */
753       if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
754 	{
755 	  struct btrace_function *caller;
756 	  btrace_function_flags flags;
757 
758 	  /* We checked NEXT->UP above so CALLER can't be NULL.  */
759 	  caller = next->up;
760 	  flags = next->flags;
761 
762 	  DEBUG_FTRACE ("adding prev's tail calls to next");
763 
764 	  ftrace_fixup_caller (next, prev->up, prev->flags);
765 
766 	  for (prev = prev->up; prev != NULL; prev = prev->up)
767 	    {
768 	      /* At the end of PREV's back trace, continue with CALLER.  */
769 	      if (prev->up == NULL)
770 		{
771 		  DEBUG_FTRACE ("fixing up link for tailcall chain");
772 		  ftrace_debug (prev, "..top");
773 		  ftrace_debug (caller, "..up");
774 
775 		  ftrace_fixup_caller (prev, caller, flags);
776 
777 		  /* If we skipped any tail calls, this may move CALLER to a
778 		     different function level.
779 
780 		     Note that changing CALLER's level is only OK because we
781 		     know that this is the last iteration of the bottom-to-top
782 		     walk in ftrace_connect_backtrace.
783 
784 		     Otherwise we will fix up CALLER's level when we connect it
785 		     to PREV's caller in the next iteration.  */
786 		  ftrace_fixup_level (caller, prev->level - caller->level - 1);
787 		  break;
788 		}
789 
790 	      /* There's nothing to do if we find a real call.  */
791 	      if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
792 		{
793 		  DEBUG_FTRACE ("will fix up link in next iteration");
794 		  break;
795 		}
796 	    }
797 	}
798     }
799 }
800 
801 /* Connect function segments on the same level in the back trace at LHS and RHS.
802    The back traces at LHS and RHS are expected to match according to
803    ftrace_match_backtrace.  */
804 
805 static void
806 ftrace_connect_backtrace (struct btrace_function *lhs,
807 			  struct btrace_function *rhs)
808 {
809   while (lhs != NULL && rhs != NULL)
810     {
811       struct btrace_function *prev, *next;
812 
813       gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
814 
815       /* Connecting LHS and RHS may change the up link.  */
816       prev = lhs;
817       next = rhs;
818 
819       lhs = ftrace_get_caller (lhs);
820       rhs = ftrace_get_caller (rhs);
821 
822       ftrace_connect_bfun (prev, next);
823     }
824 }
825 
826 /* Bridge the gap between two function segments left and right of a gap if their
827    respective back traces match in at least MIN_MATCHES functions.
828 
829    Returns non-zero if the gap could be bridged, zero otherwise.  */
830 
831 static int
832 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
833 		   int min_matches)
834 {
835   struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
836   int best_matches;
837 
838   DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 		rhs->insn_offset - 1, min_matches);
840 
841   best_matches = 0;
842   best_l = NULL;
843   best_r = NULL;
844 
845   /* We search the back traces of LHS and RHS for valid connections and connect
846      the two functon segments that give the longest combined back trace.  */
847 
848   for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849     for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
850       {
851 	int matches;
852 
853 	matches = ftrace_match_backtrace (cand_l, cand_r);
854 	if (best_matches < matches)
855 	  {
856 	    best_matches = matches;
857 	    best_l = cand_l;
858 	    best_r = cand_r;
859 	  }
860       }
861 
862   /* We need at least MIN_MATCHES matches.  */
863   gdb_assert (min_matches > 0);
864   if (best_matches < min_matches)
865     return 0;
866 
867   DEBUG_FTRACE ("..matches: %d", best_matches);
868 
869   /* We will fix up the level of BEST_R and succeeding function segments such
870      that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
871 
872      This will ignore the level of RHS and following if BEST_R != RHS.  I.e. if
873      BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
874 
875      To catch this, we already fix up the level here where we can start at RHS
876      instead of at BEST_R.  We will ignore the level fixup when connecting
877      BEST_L to BEST_R as they will already be on the same level.  */
878   ftrace_fixup_level (rhs, best_l->level - best_r->level);
879 
880   ftrace_connect_backtrace (best_l, best_r);
881 
882   return best_matches;
883 }
884 
885 /* Try to bridge gaps due to overflow or decode errors by connecting the
886    function segments that are separated by the gap.  */
887 
888 static void
889 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
890 {
891   VEC (bfun_s) *remaining;
892   struct cleanup *old_chain;
893   int min_matches;
894 
895   DEBUG ("bridge gaps");
896 
897   remaining = NULL;
898   old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
899 
900   /* We require a minimum amount of matches for bridging a gap.  The number of
901      required matches will be lowered with each iteration.
902 
903      The more matches the higher our confidence that the bridging is correct.
904      For big gaps or small traces, however, it may not be feasible to require a
905      high number of matches.  */
906   for (min_matches = 5; min_matches > 0; --min_matches)
907     {
908       /* Let's try to bridge as many gaps as we can.  In some cases, we need to
909 	 skip a gap and revisit it again after we closed later gaps.  */
910       while (!VEC_empty (bfun_s, *gaps))
911 	{
912 	  struct btrace_function *gap;
913 	  unsigned int idx;
914 
915 	  for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
916 	    {
917 	      struct btrace_function *lhs, *rhs;
918 	      int bridged;
919 
920 	      /* We may have a sequence of gaps if we run from one error into
921 		 the next as we try to re-sync onto the trace stream.  Ignore
922 		 all but the leftmost gap in such a sequence.
923 
924 		 Also ignore gaps at the beginning of the trace.  */
925 	      lhs = gap->flow.prev;
926 	      if (lhs == NULL || lhs->errcode != 0)
927 		continue;
928 
929 	      /* Skip gaps to the right.  */
930 	      for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 		if (rhs->errcode == 0)
932 		  break;
933 
934 	      /* Ignore gaps at the end of the trace.  */
935 	      if (rhs == NULL)
936 		continue;
937 
938 	      bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
939 
940 	      /* Keep track of gaps we were not able to bridge and try again.
941 		 If we just pushed them to the end of GAPS we would risk an
942 		 infinite loop in case we simply cannot bridge a gap.  */
943 	      if (bridged == 0)
944 		VEC_safe_push (bfun_s, remaining, gap);
945 	    }
946 
947 	  /* Let's see if we made any progress.  */
948 	  if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
949 	    break;
950 
951 	  VEC_free (bfun_s, *gaps);
952 
953 	  *gaps = remaining;
954 	  remaining = NULL;
955 	}
956 
957       /* We get here if either GAPS is empty or if GAPS equals REMAINING.  */
958       if (VEC_empty (bfun_s, *gaps))
959 	break;
960 
961       VEC_free (bfun_s, remaining);
962     }
963 
964   do_cleanups (old_chain);
965 
966   /* We may omit this in some cases.  Not sure it is worth the extra
967      complication, though.  */
968   ftrace_compute_global_level_offset (&tp->btrace);
969 }
970 
971 /* Compute the function branch trace from BTS trace.  */
972 
973 static void
974 btrace_compute_ftrace_bts (struct thread_info *tp,
975 			   const struct btrace_data_bts *btrace,
976 			   VEC (bfun_s) **gaps)
977 {
978   struct btrace_thread_info *btinfo;
979   struct btrace_function *begin, *end;
980   struct gdbarch *gdbarch;
981   unsigned int blk;
982   int level;
983 
984   gdbarch = target_gdbarch ();
985   btinfo = &tp->btrace;
986   begin = btinfo->begin;
987   end = btinfo->end;
988   level = begin != NULL ? -btinfo->level : INT_MAX;
989   blk = VEC_length (btrace_block_s, btrace->blocks);
990 
991   while (blk != 0)
992     {
993       btrace_block_s *block;
994       CORE_ADDR pc;
995 
996       blk -= 1;
997 
998       block = VEC_index (btrace_block_s, btrace->blocks, blk);
999       pc = block->begin;
1000 
1001       for (;;)
1002 	{
1003 	  struct btrace_insn insn;
1004 	  int size;
1005 
1006 	  /* We should hit the end of the block.  Warn if we went too far.  */
1007 	  if (block->end < pc)
1008 	    {
1009 	      /* Indicate the gap in the trace.  */
1010 	      end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1011 	      if (begin == NULL)
1012 		begin = end;
1013 
1014 	      VEC_safe_push (bfun_s, *gaps, end);
1015 
1016 	      warning (_("Recorded trace may be corrupted at instruction "
1017 			 "%u (pc = %s)."), end->insn_offset - 1,
1018 		       core_addr_to_string_nz (pc));
1019 
1020 	      break;
1021 	    }
1022 
1023 	  end = ftrace_update_function (end, pc);
1024 	  if (begin == NULL)
1025 	    begin = end;
1026 
1027 	  /* Maintain the function level offset.
1028 	     For all but the last block, we do it here.  */
1029 	  if (blk != 0)
1030 	    level = std::min (level, end->level);
1031 
1032 	  size = 0;
1033 	  TRY
1034 	    {
1035 	      size = gdb_insn_length (gdbarch, pc);
1036 	    }
1037 	  CATCH (error, RETURN_MASK_ERROR)
1038 	    {
1039 	    }
1040 	  END_CATCH
1041 
1042 	  insn.pc = pc;
1043 	  insn.size = size;
1044 	  insn.iclass = ftrace_classify_insn (gdbarch, pc);
1045 	  insn.flags = 0;
1046 
1047 	  ftrace_update_insns (end, &insn);
1048 
1049 	  /* We're done once we pushed the instruction at the end.  */
1050 	  if (block->end == pc)
1051 	    break;
1052 
1053 	  /* We can't continue if we fail to compute the size.  */
1054 	  if (size <= 0)
1055 	    {
1056 	      /* Indicate the gap in the trace.  We just added INSN so we're
1057 		 not at the beginning.  */
1058 	      end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
1059 
1060 	      VEC_safe_push (bfun_s, *gaps, end);
1061 
1062 	      warning (_("Recorded trace may be incomplete at instruction %u "
1063 			 "(pc = %s)."), end->insn_offset - 1,
1064 		       core_addr_to_string_nz (pc));
1065 
1066 	      break;
1067 	    }
1068 
1069 	  pc += size;
1070 
1071 	  /* Maintain the function level offset.
1072 	     For the last block, we do it here to not consider the last
1073 	     instruction.
1074 	     Since the last instruction corresponds to the current instruction
1075 	     and is not really part of the execution history, it shouldn't
1076 	     affect the level.  */
1077 	  if (blk == 0)
1078 	    level = std::min (level, end->level);
1079 	}
1080     }
1081 
1082   btinfo->begin = begin;
1083   btinfo->end = end;
1084 
1085   /* LEVEL is the minimal function level of all btrace function segments.
1086      Define the global level offset to -LEVEL so all function levels are
1087      normalized to start at zero.  */
1088   btinfo->level = -level;
1089 }
1090 
1091 #if defined (HAVE_LIBIPT)
1092 
1093 static enum btrace_insn_class
1094 pt_reclassify_insn (enum pt_insn_class iclass)
1095 {
1096   switch (iclass)
1097     {
1098     case ptic_call:
1099       return BTRACE_INSN_CALL;
1100 
1101     case ptic_return:
1102       return BTRACE_INSN_RETURN;
1103 
1104     case ptic_jump:
1105       return BTRACE_INSN_JUMP;
1106 
1107     default:
1108       return BTRACE_INSN_OTHER;
1109     }
1110 }
1111 
1112 /* Return the btrace instruction flags for INSN.  */
1113 
1114 static btrace_insn_flags
1115 pt_btrace_insn_flags (const struct pt_insn *insn)
1116 {
1117   btrace_insn_flags flags = 0;
1118 
1119   if (insn->speculative)
1120     flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1121 
1122   return flags;
1123 }
1124 
1125 /* Add function branch trace using DECODER.  */
1126 
1127 static void
1128 ftrace_add_pt (struct pt_insn_decoder *decoder,
1129 	       struct btrace_function **pbegin,
1130 	       struct btrace_function **pend, int *plevel,
1131 	       VEC (bfun_s) **gaps)
1132 {
1133   struct btrace_function *begin, *end, *upd;
1134   uint64_t offset;
1135   int errcode;
1136 
1137   begin = *pbegin;
1138   end = *pend;
1139   for (;;)
1140     {
1141       struct btrace_insn btinsn;
1142       struct pt_insn insn;
1143 
1144       errcode = pt_insn_sync_forward (decoder);
1145       if (errcode < 0)
1146 	{
1147 	  if (errcode != -pte_eos)
1148 	    warning (_("Failed to synchronize onto the Intel Processor "
1149 		       "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1150 	  break;
1151 	}
1152 
1153       memset (&btinsn, 0, sizeof (btinsn));
1154       for (;;)
1155 	{
1156 	  errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1157 	  if (errcode < 0)
1158 	    break;
1159 
1160 	  /* Look for gaps in the trace - unless we're at the beginning.  */
1161 	  if (begin != NULL)
1162 	    {
1163 	      /* Tracing is disabled and re-enabled each time we enter the
1164 		 kernel.  Most times, we continue from the same instruction we
1165 		 stopped before.  This is indicated via the RESUMED instruction
1166 		 flag.  The ENABLED instruction flag means that we continued
1167 		 from some other instruction.  Indicate this as a trace gap.  */
1168 	      if (insn.enabled)
1169 		{
1170 		  *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
1171 
1172 		  VEC_safe_push (bfun_s, *gaps, end);
1173 
1174 		  pt_insn_get_offset (decoder, &offset);
1175 
1176 		  warning (_("Non-contiguous trace at instruction %u (offset "
1177 			     "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1178 			   end->insn_offset - 1, offset, insn.ip);
1179 		}
1180 	    }
1181 
1182 	  /* Indicate trace overflows.  */
1183 	  if (insn.resynced)
1184 	    {
1185 	      *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1186 	      if (begin == NULL)
1187 		*pbegin = begin = end;
1188 
1189 	      VEC_safe_push (bfun_s, *gaps, end);
1190 
1191 	      pt_insn_get_offset (decoder, &offset);
1192 
1193 	      warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1194 			 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1195 		       offset, insn.ip);
1196 	    }
1197 
1198 	  upd = ftrace_update_function (end, insn.ip);
1199 	  if (upd != end)
1200 	    {
1201 	      *pend = end = upd;
1202 
1203 	      if (begin == NULL)
1204 		*pbegin = begin = upd;
1205 	    }
1206 
1207 	  /* Maintain the function level offset.  */
1208 	  *plevel = std::min (*plevel, end->level);
1209 
1210 	  btinsn.pc = (CORE_ADDR) insn.ip;
1211 	  btinsn.size = (gdb_byte) insn.size;
1212 	  btinsn.iclass = pt_reclassify_insn (insn.iclass);
1213 	  btinsn.flags = pt_btrace_insn_flags (&insn);
1214 
1215 	  ftrace_update_insns (end, &btinsn);
1216 	}
1217 
1218       if (errcode == -pte_eos)
1219 	break;
1220 
1221       /* Indicate the gap in the trace.  */
1222       *pend = end = ftrace_new_gap (end, errcode);
1223       if (begin == NULL)
1224 	*pbegin = begin = end;
1225 
1226       VEC_safe_push (bfun_s, *gaps, end);
1227 
1228       pt_insn_get_offset (decoder, &offset);
1229 
1230       warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1231 		 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1232 	       offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1233     }
1234 }
1235 
1236 /* A callback function to allow the trace decoder to read the inferior's
1237    memory.  */
1238 
1239 static int
1240 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1241 			    const struct pt_asid *asid, uint64_t pc,
1242 			    void *context)
1243 {
1244   int result, errcode;
1245 
1246   result = (int) size;
1247   TRY
1248     {
1249       errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1250       if (errcode != 0)
1251 	result = -pte_nomap;
1252     }
1253   CATCH (error, RETURN_MASK_ERROR)
1254     {
1255       result = -pte_nomap;
1256     }
1257   END_CATCH
1258 
1259   return result;
1260 }
1261 
1262 /* Translate the vendor from one enum to another.  */
1263 
1264 static enum pt_cpu_vendor
1265 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1266 {
1267   switch (vendor)
1268     {
1269     default:
1270       return pcv_unknown;
1271 
1272     case CV_INTEL:
1273       return pcv_intel;
1274     }
1275 }
1276 
1277 /* Finalize the function branch trace after decode.  */
1278 
1279 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1280 				       struct thread_info *tp, int level)
1281 {
1282   pt_insn_free_decoder (decoder);
1283 
1284   /* LEVEL is the minimal function level of all btrace function segments.
1285      Define the global level offset to -LEVEL so all function levels are
1286      normalized to start at zero.  */
1287   tp->btrace.level = -level;
1288 
1289   /* Add a single last instruction entry for the current PC.
1290      This allows us to compute the backtrace at the current PC using both
1291      standard unwind and btrace unwind.
1292      This extra entry is ignored by all record commands.  */
1293   btrace_add_pc (tp);
1294 }
1295 
1296 /* Compute the function branch trace from Intel Processor Trace
1297    format.  */
1298 
1299 static void
1300 btrace_compute_ftrace_pt (struct thread_info *tp,
1301 			  const struct btrace_data_pt *btrace,
1302 			  VEC (bfun_s) **gaps)
1303 {
1304   struct btrace_thread_info *btinfo;
1305   struct pt_insn_decoder *decoder;
1306   struct pt_config config;
1307   int level, errcode;
1308 
1309   if (btrace->size == 0)
1310     return;
1311 
1312   btinfo = &tp->btrace;
1313   level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1314 
1315   pt_config_init(&config);
1316   config.begin = btrace->data;
1317   config.end = btrace->data + btrace->size;
1318 
1319   config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1320   config.cpu.family = btrace->config.cpu.family;
1321   config.cpu.model = btrace->config.cpu.model;
1322   config.cpu.stepping = btrace->config.cpu.stepping;
1323 
1324   errcode = pt_cpu_errata (&config.errata, &config.cpu);
1325   if (errcode < 0)
1326     error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1327 	   pt_errstr (pt_errcode (errcode)));
1328 
1329   decoder = pt_insn_alloc_decoder (&config);
1330   if (decoder == NULL)
1331     error (_("Failed to allocate the Intel Processor Trace decoder."));
1332 
1333   TRY
1334     {
1335       struct pt_image *image;
1336 
1337       image = pt_insn_get_image(decoder);
1338       if (image == NULL)
1339 	error (_("Failed to configure the Intel Processor Trace decoder."));
1340 
1341       errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1342       if (errcode < 0)
1343 	error (_("Failed to configure the Intel Processor Trace decoder: "
1344 		 "%s."), pt_errstr (pt_errcode (errcode)));
1345 
1346       ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
1347     }
1348   CATCH (error, RETURN_MASK_ALL)
1349     {
1350       /* Indicate a gap in the trace if we quit trace processing.  */
1351       if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1352 	{
1353 	  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
1354 
1355 	  VEC_safe_push (bfun_s, *gaps, btinfo->end);
1356 	}
1357 
1358       btrace_finalize_ftrace_pt (decoder, tp, level);
1359 
1360       throw_exception (error);
1361     }
1362   END_CATCH
1363 
1364   btrace_finalize_ftrace_pt (decoder, tp, level);
1365 }
1366 
1367 #else /* defined (HAVE_LIBIPT)  */
1368 
1369 static void
1370 btrace_compute_ftrace_pt (struct thread_info *tp,
1371 			  const struct btrace_data_pt *btrace,
1372 			  VEC (bfun_s) **gaps)
1373 {
1374   internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1375 }
1376 
1377 #endif /* defined (HAVE_LIBIPT)  */
1378 
1379 /* Compute the function branch trace from a block branch trace BTRACE for
1380    a thread given by BTINFO.  */
1381 
1382 static void
1383 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1384 			 VEC (bfun_s) **gaps)
1385 {
1386   DEBUG ("compute ftrace");
1387 
1388   switch (btrace->format)
1389     {
1390     case BTRACE_FORMAT_NONE:
1391       return;
1392 
1393     case BTRACE_FORMAT_BTS:
1394       btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1395       return;
1396 
1397     case BTRACE_FORMAT_PT:
1398       btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1399       return;
1400     }
1401 
1402   internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1403 }
1404 
1405 static void
1406 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1407 {
1408   if (!VEC_empty (bfun_s, *gaps))
1409     {
1410       tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1411       btrace_bridge_gaps (tp, gaps);
1412     }
1413 }
1414 
1415 static void
1416 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1417 {
1418   VEC (bfun_s) *gaps;
1419   struct cleanup *old_chain;
1420 
1421   gaps = NULL;
1422   old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1423 
1424   TRY
1425     {
1426       btrace_compute_ftrace_1 (tp, btrace, &gaps);
1427     }
1428   CATCH (error, RETURN_MASK_ALL)
1429     {
1430       btrace_finalize_ftrace (tp, &gaps);
1431 
1432       throw_exception (error);
1433     }
1434   END_CATCH
1435 
1436   btrace_finalize_ftrace (tp, &gaps);
1437 
1438   do_cleanups (old_chain);
1439 }
1440 
1441 /* Add an entry for the current PC.  */
1442 
1443 static void
1444 btrace_add_pc (struct thread_info *tp)
1445 {
1446   struct btrace_data btrace;
1447   struct btrace_block *block;
1448   struct regcache *regcache;
1449   struct cleanup *cleanup;
1450   CORE_ADDR pc;
1451 
1452   regcache = get_thread_regcache (tp->ptid);
1453   pc = regcache_read_pc (regcache);
1454 
1455   btrace_data_init (&btrace);
1456   btrace.format = BTRACE_FORMAT_BTS;
1457   btrace.variant.bts.blocks = NULL;
1458 
1459   cleanup = make_cleanup_btrace_data (&btrace);
1460 
1461   block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1462   block->begin = pc;
1463   block->end = pc;
1464 
1465   btrace_compute_ftrace (tp, &btrace);
1466 
1467   do_cleanups (cleanup);
1468 }
1469 
1470 /* See btrace.h.  */
1471 
1472 void
1473 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1474 {
1475   if (tp->btrace.target != NULL)
1476     return;
1477 
1478 #if !defined (HAVE_LIBIPT)
1479   if (conf->format == BTRACE_FORMAT_PT)
1480     error (_("GDB does not support Intel Processor Trace."));
1481 #endif /* !defined (HAVE_LIBIPT) */
1482 
1483   if (!target_supports_btrace (conf->format))
1484     error (_("Target does not support branch tracing."));
1485 
1486   DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1487 	 target_pid_to_str (tp->ptid));
1488 
1489   tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1490 
1491   /* We're done if we failed to enable tracing.  */
1492   if (tp->btrace.target == NULL)
1493     return;
1494 
1495   /* We need to undo the enable in case of errors.  */
1496   TRY
1497     {
1498       /* Add an entry for the current PC so we start tracing from where we
1499 	 enabled it.
1500 
1501 	 If we can't access TP's registers, TP is most likely running.  In this
1502 	 case, we can't really say where tracing was enabled so it should be
1503 	 safe to simply skip this step.
1504 
1505 	 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1506 	 start at the PC at which tracing was enabled.  */
1507       if (conf->format != BTRACE_FORMAT_PT
1508 	  && can_access_registers_ptid (tp->ptid))
1509 	btrace_add_pc (tp);
1510     }
1511   CATCH (exception, RETURN_MASK_ALL)
1512     {
1513       btrace_disable (tp);
1514 
1515       throw_exception (exception);
1516     }
1517   END_CATCH
1518 }
1519 
1520 /* See btrace.h.  */
1521 
1522 const struct btrace_config *
1523 btrace_conf (const struct btrace_thread_info *btinfo)
1524 {
1525   if (btinfo->target == NULL)
1526     return NULL;
1527 
1528   return target_btrace_conf (btinfo->target);
1529 }
1530 
1531 /* See btrace.h.  */
1532 
1533 void
1534 btrace_disable (struct thread_info *tp)
1535 {
1536   struct btrace_thread_info *btp = &tp->btrace;
1537   int errcode = 0;
1538 
1539   if (btp->target == NULL)
1540     return;
1541 
1542   DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1543 	 target_pid_to_str (tp->ptid));
1544 
1545   target_disable_btrace (btp->target);
1546   btp->target = NULL;
1547 
1548   btrace_clear (tp);
1549 }
1550 
1551 /* See btrace.h.  */
1552 
1553 void
1554 btrace_teardown (struct thread_info *tp)
1555 {
1556   struct btrace_thread_info *btp = &tp->btrace;
1557   int errcode = 0;
1558 
1559   if (btp->target == NULL)
1560     return;
1561 
1562   DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1563 	 target_pid_to_str (tp->ptid));
1564 
1565   target_teardown_btrace (btp->target);
1566   btp->target = NULL;
1567 
1568   btrace_clear (tp);
1569 }
1570 
1571 /* Stitch branch trace in BTS format.  */
1572 
1573 static int
1574 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1575 {
1576   struct btrace_thread_info *btinfo;
1577   struct btrace_function *last_bfun;
1578   struct btrace_insn *last_insn;
1579   btrace_block_s *first_new_block;
1580 
1581   btinfo = &tp->btrace;
1582   last_bfun = btinfo->end;
1583   gdb_assert (last_bfun != NULL);
1584   gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1585 
1586   /* If the existing trace ends with a gap, we just glue the traces
1587      together.  We need to drop the last (i.e. chronologically first) block
1588      of the new trace,  though, since we can't fill in the start address.*/
1589   if (VEC_empty (btrace_insn_s, last_bfun->insn))
1590     {
1591       VEC_pop (btrace_block_s, btrace->blocks);
1592       return 0;
1593     }
1594 
1595   /* Beware that block trace starts with the most recent block, so the
1596      chronologically first block in the new trace is the last block in
1597      the new trace's block vector.  */
1598   first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1599   last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1600 
1601   /* If the current PC at the end of the block is the same as in our current
1602      trace, there are two explanations:
1603        1. we executed the instruction and some branch brought us back.
1604        2. we have not made any progress.
1605      In the first case, the delta trace vector should contain at least two
1606      entries.
1607      In the second case, the delta trace vector should contain exactly one
1608      entry for the partial block containing the current PC.  Remove it.  */
1609   if (first_new_block->end == last_insn->pc
1610       && VEC_length (btrace_block_s, btrace->blocks) == 1)
1611     {
1612       VEC_pop (btrace_block_s, btrace->blocks);
1613       return 0;
1614     }
1615 
1616   DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1617 	 core_addr_to_string_nz (first_new_block->end));
1618 
1619   /* Do a simple sanity check to make sure we don't accidentally end up
1620      with a bad block.  This should not occur in practice.  */
1621   if (first_new_block->end < last_insn->pc)
1622     {
1623       warning (_("Error while trying to read delta trace.  Falling back to "
1624 		 "a full read."));
1625       return -1;
1626     }
1627 
1628   /* We adjust the last block to start at the end of our current trace.  */
1629   gdb_assert (first_new_block->begin == 0);
1630   first_new_block->begin = last_insn->pc;
1631 
1632   /* We simply pop the last insn so we can insert it again as part of
1633      the normal branch trace computation.
1634      Since instruction iterators are based on indices in the instructions
1635      vector, we don't leave any pointers dangling.  */
1636   DEBUG ("pruning insn at %s for stitching",
1637 	 ftrace_print_insn_addr (last_insn));
1638 
1639   VEC_pop (btrace_insn_s, last_bfun->insn);
1640 
1641   /* The instructions vector may become empty temporarily if this has
1642      been the only instruction in this function segment.
1643      This violates the invariant but will be remedied shortly by
1644      btrace_compute_ftrace when we add the new trace.  */
1645 
1646   /* The only case where this would hurt is if the entire trace consisted
1647      of just that one instruction.  If we remove it, we might turn the now
1648      empty btrace function segment into a gap.  But we don't want gaps at
1649      the beginning.  To avoid this, we remove the entire old trace.  */
1650   if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1651     btrace_clear (tp);
1652 
1653   return 0;
1654 }
1655 
1656 /* Adjust the block trace in order to stitch old and new trace together.
1657    BTRACE is the new delta trace between the last and the current stop.
1658    TP is the traced thread.
1659    May modifx BTRACE as well as the existing trace in TP.
1660    Return 0 on success, -1 otherwise.  */
1661 
1662 static int
1663 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1664 {
1665   /* If we don't have trace, there's nothing to do.  */
1666   if (btrace_data_empty (btrace))
1667     return 0;
1668 
1669   switch (btrace->format)
1670     {
1671     case BTRACE_FORMAT_NONE:
1672       return 0;
1673 
1674     case BTRACE_FORMAT_BTS:
1675       return btrace_stitch_bts (&btrace->variant.bts, tp);
1676 
1677     case BTRACE_FORMAT_PT:
1678       /* Delta reads are not supported.  */
1679       return -1;
1680     }
1681 
1682   internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1683 }
1684 
1685 /* Clear the branch trace histories in BTINFO.  */
1686 
1687 static void
1688 btrace_clear_history (struct btrace_thread_info *btinfo)
1689 {
1690   xfree (btinfo->insn_history);
1691   xfree (btinfo->call_history);
1692   xfree (btinfo->replay);
1693 
1694   btinfo->insn_history = NULL;
1695   btinfo->call_history = NULL;
1696   btinfo->replay = NULL;
1697 }
1698 
1699 /* Clear the branch trace maintenance histories in BTINFO.  */
1700 
1701 static void
1702 btrace_maint_clear (struct btrace_thread_info *btinfo)
1703 {
1704   switch (btinfo->data.format)
1705     {
1706     default:
1707       break;
1708 
1709     case BTRACE_FORMAT_BTS:
1710       btinfo->maint.variant.bts.packet_history.begin = 0;
1711       btinfo->maint.variant.bts.packet_history.end = 0;
1712       break;
1713 
1714 #if defined (HAVE_LIBIPT)
1715     case BTRACE_FORMAT_PT:
1716       xfree (btinfo->maint.variant.pt.packets);
1717 
1718       btinfo->maint.variant.pt.packets = NULL;
1719       btinfo->maint.variant.pt.packet_history.begin = 0;
1720       btinfo->maint.variant.pt.packet_history.end = 0;
1721       break;
1722 #endif /* defined (HAVE_LIBIPT)  */
1723     }
1724 }
1725 
1726 /* See btrace.h.  */
1727 
1728 const char *
1729 btrace_decode_error (enum btrace_format format, int errcode)
1730 {
1731   switch (format)
1732     {
1733     case BTRACE_FORMAT_BTS:
1734       switch (errcode)
1735 	{
1736 	case BDE_BTS_OVERFLOW:
1737 	  return _("instruction overflow");
1738 
1739 	case BDE_BTS_INSN_SIZE:
1740 	  return _("unknown instruction");
1741 
1742 	default:
1743 	  break;
1744 	}
1745       break;
1746 
1747 #if defined (HAVE_LIBIPT)
1748     case BTRACE_FORMAT_PT:
1749       switch (errcode)
1750 	{
1751 	case BDE_PT_USER_QUIT:
1752 	  return _("trace decode cancelled");
1753 
1754 	case BDE_PT_DISABLED:
1755 	  return _("disabled");
1756 
1757 	case BDE_PT_OVERFLOW:
1758 	  return _("overflow");
1759 
1760 	default:
1761 	  if (errcode < 0)
1762 	    return pt_errstr (pt_errcode (errcode));
1763 	  break;
1764 	}
1765       break;
1766 #endif /* defined (HAVE_LIBIPT)  */
1767 
1768     default:
1769       break;
1770     }
1771 
1772   return _("unknown");
1773 }
1774 
1775 /* See btrace.h.  */
1776 
1777 void
1778 btrace_fetch (struct thread_info *tp)
1779 {
1780   struct btrace_thread_info *btinfo;
1781   struct btrace_target_info *tinfo;
1782   struct btrace_data btrace;
1783   struct cleanup *cleanup;
1784   int errcode;
1785 
1786   DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1787 	 target_pid_to_str (tp->ptid));
1788 
1789   btinfo = &tp->btrace;
1790   tinfo = btinfo->target;
1791   if (tinfo == NULL)
1792     return;
1793 
1794   /* There's no way we could get new trace while replaying.
1795      On the other hand, delta trace would return a partial record with the
1796      current PC, which is the replay PC, not the last PC, as expected.  */
1797   if (btinfo->replay != NULL)
1798     return;
1799 
1800   /* With CLI usage, TP->PTID always equals INFERIOR_PTID here.  Now that we
1801      can store a gdb.Record object in Python referring to a different thread
1802      than the current one, temporarily set INFERIOR_PTID.  */
1803   cleanup = save_inferior_ptid ();
1804   inferior_ptid = tp->ptid;
1805 
1806   /* We should not be called on running or exited threads.  */
1807   gdb_assert (can_access_registers_ptid (tp->ptid));
1808 
1809   btrace_data_init (&btrace);
1810   make_cleanup_btrace_data (&btrace);
1811 
1812   /* Let's first try to extend the trace we already have.  */
1813   if (btinfo->end != NULL)
1814     {
1815       errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1816       if (errcode == 0)
1817 	{
1818 	  /* Success.  Let's try to stitch the traces together.  */
1819 	  errcode = btrace_stitch_trace (&btrace, tp);
1820 	}
1821       else
1822 	{
1823 	  /* We failed to read delta trace.  Let's try to read new trace.  */
1824 	  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1825 
1826 	  /* If we got any new trace, discard what we have.  */
1827 	  if (errcode == 0 && !btrace_data_empty (&btrace))
1828 	    btrace_clear (tp);
1829 	}
1830 
1831       /* If we were not able to read the trace, we start over.  */
1832       if (errcode != 0)
1833 	{
1834 	  btrace_clear (tp);
1835 	  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1836 	}
1837     }
1838   else
1839     errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1840 
1841   /* If we were not able to read the branch trace, signal an error.  */
1842   if (errcode != 0)
1843     error (_("Failed to read branch trace."));
1844 
1845   /* Compute the trace, provided we have any.  */
1846   if (!btrace_data_empty (&btrace))
1847     {
1848       struct btrace_function *bfun;
1849 
1850       /* Store the raw trace data.  The stored data will be cleared in
1851 	 btrace_clear, so we always append the new trace.  */
1852       btrace_data_append (&btinfo->data, &btrace);
1853       btrace_maint_clear (btinfo);
1854 
1855       VEC_truncate (btrace_fun_p, btinfo->functions, 0);
1856       btrace_clear_history (btinfo);
1857       btrace_compute_ftrace (tp, &btrace);
1858 
1859       for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
1860 	VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
1861     }
1862 
1863   do_cleanups (cleanup);
1864 }
1865 
1866 /* See btrace.h.  */
1867 
1868 void
1869 btrace_clear (struct thread_info *tp)
1870 {
1871   struct btrace_thread_info *btinfo;
1872   struct btrace_function *it, *trash;
1873 
1874   DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1875 	 target_pid_to_str (tp->ptid));
1876 
1877   /* Make sure btrace frames that may hold a pointer into the branch
1878      trace data are destroyed.  */
1879   reinit_frame_cache ();
1880 
1881   btinfo = &tp->btrace;
1882 
1883   VEC_free (btrace_fun_p, btinfo->functions);
1884 
1885   it = btinfo->begin;
1886   while (it != NULL)
1887     {
1888       trash = it;
1889       it = it->flow.next;
1890 
1891       xfree (trash);
1892     }
1893 
1894   btinfo->begin = NULL;
1895   btinfo->end = NULL;
1896   btinfo->ngaps = 0;
1897 
1898   /* Must clear the maint data before - it depends on BTINFO->DATA.  */
1899   btrace_maint_clear (btinfo);
1900   btrace_data_clear (&btinfo->data);
1901   btrace_clear_history (btinfo);
1902 }
1903 
1904 /* See btrace.h.  */
1905 
1906 void
1907 btrace_free_objfile (struct objfile *objfile)
1908 {
1909   struct thread_info *tp;
1910 
1911   DEBUG ("free objfile");
1912 
1913   ALL_NON_EXITED_THREADS (tp)
1914     btrace_clear (tp);
1915 }
1916 
1917 #if defined (HAVE_LIBEXPAT)
1918 
1919 /* Check the btrace document version.  */
1920 
1921 static void
1922 check_xml_btrace_version (struct gdb_xml_parser *parser,
1923 			  const struct gdb_xml_element *element,
1924 			  void *user_data, VEC (gdb_xml_value_s) *attributes)
1925 {
1926   const char *version
1927     = (const char *) xml_find_attribute (attributes, "version")->value;
1928 
1929   if (strcmp (version, "1.0") != 0)
1930     gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1931 }
1932 
1933 /* Parse a btrace "block" xml record.  */
1934 
1935 static void
1936 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1937 			const struct gdb_xml_element *element,
1938 			void *user_data, VEC (gdb_xml_value_s) *attributes)
1939 {
1940   struct btrace_data *btrace;
1941   struct btrace_block *block;
1942   ULONGEST *begin, *end;
1943 
1944   btrace = (struct btrace_data *) user_data;
1945 
1946   switch (btrace->format)
1947     {
1948     case BTRACE_FORMAT_BTS:
1949       break;
1950 
1951     case BTRACE_FORMAT_NONE:
1952       btrace->format = BTRACE_FORMAT_BTS;
1953       btrace->variant.bts.blocks = NULL;
1954       break;
1955 
1956     default:
1957       gdb_xml_error (parser, _("Btrace format error."));
1958     }
1959 
1960   begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1961   end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1962 
1963   block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1964   block->begin = *begin;
1965   block->end = *end;
1966 }
1967 
1968 /* Parse a "raw" xml record.  */
1969 
1970 static void
1971 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1972 	       gdb_byte **pdata, size_t *psize)
1973 {
1974   struct cleanup *cleanup;
1975   gdb_byte *data, *bin;
1976   size_t len, size;
1977 
1978   len = strlen (body_text);
1979   if (len % 2 != 0)
1980     gdb_xml_error (parser, _("Bad raw data size."));
1981 
1982   size = len / 2;
1983 
1984   bin = data = (gdb_byte *) xmalloc (size);
1985   cleanup = make_cleanup (xfree, data);
1986 
1987   /* We use hex encoding - see common/rsp-low.h.  */
1988   while (len > 0)
1989     {
1990       char hi, lo;
1991 
1992       hi = *body_text++;
1993       lo = *body_text++;
1994 
1995       if (hi == 0 || lo == 0)
1996 	gdb_xml_error (parser, _("Bad hex encoding."));
1997 
1998       *bin++ = fromhex (hi) * 16 + fromhex (lo);
1999       len -= 2;
2000     }
2001 
2002   discard_cleanups (cleanup);
2003 
2004   *pdata = data;
2005   *psize = size;
2006 }
2007 
2008 /* Parse a btrace pt-config "cpu" xml record.  */
2009 
2010 static void
2011 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2012 				const struct gdb_xml_element *element,
2013 				void *user_data,
2014 				VEC (gdb_xml_value_s) *attributes)
2015 {
2016   struct btrace_data *btrace;
2017   const char *vendor;
2018   ULONGEST *family, *model, *stepping;
2019 
2020   vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2021   family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2022   model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2023   stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2024 
2025   btrace = (struct btrace_data *) user_data;
2026 
2027   if (strcmp (vendor, "GenuineIntel") == 0)
2028     btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2029 
2030   btrace->variant.pt.config.cpu.family = *family;
2031   btrace->variant.pt.config.cpu.model = *model;
2032   btrace->variant.pt.config.cpu.stepping = *stepping;
2033 }
2034 
2035 /* Parse a btrace pt "raw" xml record.  */
2036 
2037 static void
2038 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2039 			 const struct gdb_xml_element *element,
2040 			 void *user_data, const char *body_text)
2041 {
2042   struct btrace_data *btrace;
2043 
2044   btrace = (struct btrace_data *) user_data;
2045   parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2046 		 &btrace->variant.pt.size);
2047 }
2048 
2049 /* Parse a btrace "pt" xml record.  */
2050 
2051 static void
2052 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2053 		     const struct gdb_xml_element *element,
2054 		     void *user_data, VEC (gdb_xml_value_s) *attributes)
2055 {
2056   struct btrace_data *btrace;
2057 
2058   btrace = (struct btrace_data *) user_data;
2059   btrace->format = BTRACE_FORMAT_PT;
2060   btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2061   btrace->variant.pt.data = NULL;
2062   btrace->variant.pt.size = 0;
2063 }
2064 
2065 static const struct gdb_xml_attribute block_attributes[] = {
2066   { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2067   { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2068   { NULL, GDB_XML_AF_NONE, NULL, NULL }
2069 };
2070 
2071 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2072   { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2073   { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2074   { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2075   { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2076   { NULL, GDB_XML_AF_NONE, NULL, NULL }
2077 };
2078 
2079 static const struct gdb_xml_element btrace_pt_config_children[] = {
2080   { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2081     parse_xml_btrace_pt_config_cpu, NULL },
2082   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2083 };
2084 
2085 static const struct gdb_xml_element btrace_pt_children[] = {
2086   { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2087     NULL },
2088   { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2089   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2090 };
2091 
2092 static const struct gdb_xml_attribute btrace_attributes[] = {
2093   { "version", GDB_XML_AF_NONE, NULL, NULL },
2094   { NULL, GDB_XML_AF_NONE, NULL, NULL }
2095 };
2096 
2097 static const struct gdb_xml_element btrace_children[] = {
2098   { "block", block_attributes, NULL,
2099     GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2100   { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2101     NULL },
2102   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2103 };
2104 
2105 static const struct gdb_xml_element btrace_elements[] = {
2106   { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2107     check_xml_btrace_version, NULL },
2108   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2109 };
2110 
2111 #endif /* defined (HAVE_LIBEXPAT) */
2112 
2113 /* See btrace.h.  */
2114 
2115 void
2116 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2117 {
2118   struct cleanup *cleanup;
2119   int errcode;
2120 
2121 #if defined (HAVE_LIBEXPAT)
2122 
2123   btrace->format = BTRACE_FORMAT_NONE;
2124 
2125   cleanup = make_cleanup_btrace_data (btrace);
2126   errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2127 				 buffer, btrace);
2128   if (errcode != 0)
2129     error (_("Error parsing branch trace."));
2130 
2131   /* Keep parse results.  */
2132   discard_cleanups (cleanup);
2133 
2134 #else  /* !defined (HAVE_LIBEXPAT) */
2135 
2136   error (_("Cannot process branch trace.  XML parsing is not supported."));
2137 
2138 #endif  /* !defined (HAVE_LIBEXPAT) */
2139 }
2140 
2141 #if defined (HAVE_LIBEXPAT)
2142 
2143 /* Parse a btrace-conf "bts" xml record.  */
2144 
2145 static void
2146 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2147 			  const struct gdb_xml_element *element,
2148 			  void *user_data, VEC (gdb_xml_value_s) *attributes)
2149 {
2150   struct btrace_config *conf;
2151   struct gdb_xml_value *size;
2152 
2153   conf = (struct btrace_config *) user_data;
2154   conf->format = BTRACE_FORMAT_BTS;
2155   conf->bts.size = 0;
2156 
2157   size = xml_find_attribute (attributes, "size");
2158   if (size != NULL)
2159     conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2160 }
2161 
2162 /* Parse a btrace-conf "pt" xml record.  */
2163 
2164 static void
2165 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2166 			  const struct gdb_xml_element *element,
2167 			  void *user_data, VEC (gdb_xml_value_s) *attributes)
2168 {
2169   struct btrace_config *conf;
2170   struct gdb_xml_value *size;
2171 
2172   conf = (struct btrace_config *) user_data;
2173   conf->format = BTRACE_FORMAT_PT;
2174   conf->pt.size = 0;
2175 
2176   size = xml_find_attribute (attributes, "size");
2177   if (size != NULL)
2178     conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2179 }
2180 
2181 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2182   { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2183   { NULL, GDB_XML_AF_NONE, NULL, NULL }
2184 };
2185 
2186 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2187   { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2188   { NULL, GDB_XML_AF_NONE, NULL, NULL }
2189 };
2190 
2191 static const struct gdb_xml_element btrace_conf_children[] = {
2192   { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2193     parse_xml_btrace_conf_bts, NULL },
2194   { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2195     parse_xml_btrace_conf_pt, NULL },
2196   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2197 };
2198 
2199 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2200   { "version", GDB_XML_AF_NONE, NULL, NULL },
2201   { NULL, GDB_XML_AF_NONE, NULL, NULL }
2202 };
2203 
2204 static const struct gdb_xml_element btrace_conf_elements[] = {
2205   { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2206     GDB_XML_EF_NONE, NULL, NULL },
2207   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2208 };
2209 
2210 #endif /* defined (HAVE_LIBEXPAT) */
2211 
2212 /* See btrace.h.  */
2213 
2214 void
2215 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2216 {
2217   int errcode;
2218 
2219 #if defined (HAVE_LIBEXPAT)
2220 
2221   errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2222 				 btrace_conf_elements, xml, conf);
2223   if (errcode != 0)
2224     error (_("Error parsing branch trace configuration."));
2225 
2226 #else  /* !defined (HAVE_LIBEXPAT) */
2227 
2228   error (_("XML parsing is not supported."));
2229 
2230 #endif  /* !defined (HAVE_LIBEXPAT) */
2231 }
2232 
2233 /* See btrace.h.  */
2234 
2235 const struct btrace_insn *
2236 btrace_insn_get (const struct btrace_insn_iterator *it)
2237 {
2238   const struct btrace_function *bfun;
2239   unsigned int index, end;
2240 
2241   index = it->index;
2242   bfun = it->function;
2243 
2244   /* Check if the iterator points to a gap in the trace.  */
2245   if (bfun->errcode != 0)
2246     return NULL;
2247 
2248   /* The index is within the bounds of this function's instruction vector.  */
2249   end = VEC_length (btrace_insn_s, bfun->insn);
2250   gdb_assert (0 < end);
2251   gdb_assert (index < end);
2252 
2253   return VEC_index (btrace_insn_s, bfun->insn, index);
2254 }
2255 
2256 /* See btrace.h.  */
2257 
2258 int
2259 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2260 {
2261   return it->function->errcode;
2262 }
2263 
2264 /* See btrace.h.  */
2265 
2266 unsigned int
2267 btrace_insn_number (const struct btrace_insn_iterator *it)
2268 {
2269   return it->function->insn_offset + it->index;
2270 }
2271 
2272 /* See btrace.h.  */
2273 
2274 void
2275 btrace_insn_begin (struct btrace_insn_iterator *it,
2276 		   const struct btrace_thread_info *btinfo)
2277 {
2278   const struct btrace_function *bfun;
2279 
2280   bfun = btinfo->begin;
2281   if (bfun == NULL)
2282     error (_("No trace."));
2283 
2284   it->function = bfun;
2285   it->index = 0;
2286 }
2287 
2288 /* See btrace.h.  */
2289 
2290 void
2291 btrace_insn_end (struct btrace_insn_iterator *it,
2292 		 const struct btrace_thread_info *btinfo)
2293 {
2294   const struct btrace_function *bfun;
2295   unsigned int length;
2296 
2297   bfun = btinfo->end;
2298   if (bfun == NULL)
2299     error (_("No trace."));
2300 
2301   length = VEC_length (btrace_insn_s, bfun->insn);
2302 
2303   /* The last function may either be a gap or it contains the current
2304      instruction, which is one past the end of the execution trace; ignore
2305      it.  */
2306   if (length > 0)
2307     length -= 1;
2308 
2309   it->function = bfun;
2310   it->index = length;
2311 }
2312 
2313 /* See btrace.h.  */
2314 
2315 unsigned int
2316 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2317 {
2318   const struct btrace_function *bfun;
2319   unsigned int index, steps;
2320 
2321   bfun = it->function;
2322   steps = 0;
2323   index = it->index;
2324 
2325   while (stride != 0)
2326     {
2327       unsigned int end, space, adv;
2328 
2329       end = VEC_length (btrace_insn_s, bfun->insn);
2330 
2331       /* An empty function segment represents a gap in the trace.  We count
2332 	 it as one instruction.  */
2333       if (end == 0)
2334 	{
2335 	  const struct btrace_function *next;
2336 
2337 	  next = bfun->flow.next;
2338 	  if (next == NULL)
2339 	    break;
2340 
2341 	  stride -= 1;
2342 	  steps += 1;
2343 
2344 	  bfun = next;
2345 	  index = 0;
2346 
2347 	  continue;
2348 	}
2349 
2350       gdb_assert (0 < end);
2351       gdb_assert (index < end);
2352 
2353       /* Compute the number of instructions remaining in this segment.  */
2354       space = end - index;
2355 
2356       /* Advance the iterator as far as possible within this segment.  */
2357       adv = std::min (space, stride);
2358       stride -= adv;
2359       index += adv;
2360       steps += adv;
2361 
2362       /* Move to the next function if we're at the end of this one.  */
2363       if (index == end)
2364 	{
2365 	  const struct btrace_function *next;
2366 
2367 	  next = bfun->flow.next;
2368 	  if (next == NULL)
2369 	    {
2370 	      /* We stepped past the last function.
2371 
2372 		 Let's adjust the index to point to the last instruction in
2373 		 the previous function.  */
2374 	      index -= 1;
2375 	      steps -= 1;
2376 	      break;
2377 	    }
2378 
2379 	  /* We now point to the first instruction in the new function.  */
2380 	  bfun = next;
2381 	  index = 0;
2382 	}
2383 
2384       /* We did make progress.  */
2385       gdb_assert (adv > 0);
2386     }
2387 
2388   /* Update the iterator.  */
2389   it->function = bfun;
2390   it->index = index;
2391 
2392   return steps;
2393 }
2394 
2395 /* See btrace.h.  */
2396 
2397 unsigned int
2398 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2399 {
2400   const struct btrace_function *bfun;
2401   unsigned int index, steps;
2402 
2403   bfun = it->function;
2404   steps = 0;
2405   index = it->index;
2406 
2407   while (stride != 0)
2408     {
2409       unsigned int adv;
2410 
2411       /* Move to the previous function if we're at the start of this one.  */
2412       if (index == 0)
2413 	{
2414 	  const struct btrace_function *prev;
2415 
2416 	  prev = bfun->flow.prev;
2417 	  if (prev == NULL)
2418 	    break;
2419 
2420 	  /* We point to one after the last instruction in the new function.  */
2421 	  bfun = prev;
2422 	  index = VEC_length (btrace_insn_s, bfun->insn);
2423 
2424 	  /* An empty function segment represents a gap in the trace.  We count
2425 	     it as one instruction.  */
2426 	  if (index == 0)
2427 	    {
2428 	      stride -= 1;
2429 	      steps += 1;
2430 
2431 	      continue;
2432 	    }
2433 	}
2434 
2435       /* Advance the iterator as far as possible within this segment.  */
2436       adv = std::min (index, stride);
2437 
2438       stride -= adv;
2439       index -= adv;
2440       steps += adv;
2441 
2442       /* We did make progress.  */
2443       gdb_assert (adv > 0);
2444     }
2445 
2446   /* Update the iterator.  */
2447   it->function = bfun;
2448   it->index = index;
2449 
2450   return steps;
2451 }
2452 
2453 /* See btrace.h.  */
2454 
2455 int
2456 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2457 		 const struct btrace_insn_iterator *rhs)
2458 {
2459   unsigned int lnum, rnum;
2460 
2461   lnum = btrace_insn_number (lhs);
2462   rnum = btrace_insn_number (rhs);
2463 
2464   return (int) (lnum - rnum);
2465 }
2466 
2467 /* See btrace.h.  */
2468 
2469 int
2470 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2471 			    const struct btrace_thread_info *btinfo,
2472 			    unsigned int number)
2473 {
2474   const struct btrace_function *bfun;
2475   unsigned int upper, lower;
2476 
2477   if (VEC_empty (btrace_fun_p, btinfo->functions))
2478       return 0;
2479 
2480   lower = 0;
2481   bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
2482   if (number < bfun->insn_offset)
2483     return 0;
2484 
2485   upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
2486   bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
2487   if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2488     return 0;
2489 
2490   /* We assume that there are no holes in the numbering.  */
2491   for (;;)
2492     {
2493       const unsigned int average = lower + (upper - lower) / 2;
2494 
2495       bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
2496 
2497       if (number < bfun->insn_offset)
2498 	{
2499 	  upper = average - 1;
2500 	  continue;
2501 	}
2502 
2503       if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2504 	{
2505 	  lower = average + 1;
2506 	  continue;
2507 	}
2508 
2509       break;
2510     }
2511 
2512   it->function = bfun;
2513   it->index = number - bfun->insn_offset;
2514   return 1;
2515 }
2516 
2517 /* See btrace.h.  */
2518 
2519 const struct btrace_function *
2520 btrace_call_get (const struct btrace_call_iterator *it)
2521 {
2522   return it->function;
2523 }
2524 
2525 /* See btrace.h.  */
2526 
2527 unsigned int
2528 btrace_call_number (const struct btrace_call_iterator *it)
2529 {
2530   const struct btrace_thread_info *btinfo;
2531   const struct btrace_function *bfun;
2532   unsigned int insns;
2533 
2534   btinfo = it->btinfo;
2535   bfun = it->function;
2536   if (bfun != NULL)
2537     return bfun->number;
2538 
2539   /* For the end iterator, i.e. bfun == NULL, we return one more than the
2540      number of the last function.  */
2541   bfun = btinfo->end;
2542   insns = VEC_length (btrace_insn_s, bfun->insn);
2543 
2544   /* If the function contains only a single instruction (i.e. the current
2545      instruction), it will be skipped and its number is already the number
2546      we seek.  */
2547   if (insns == 1)
2548     return bfun->number;
2549 
2550   /* Otherwise, return one more than the number of the last function.  */
2551   return bfun->number + 1;
2552 }
2553 
2554 /* See btrace.h.  */
2555 
2556 void
2557 btrace_call_begin (struct btrace_call_iterator *it,
2558 		   const struct btrace_thread_info *btinfo)
2559 {
2560   const struct btrace_function *bfun;
2561 
2562   bfun = btinfo->begin;
2563   if (bfun == NULL)
2564     error (_("No trace."));
2565 
2566   it->btinfo = btinfo;
2567   it->function = bfun;
2568 }
2569 
2570 /* See btrace.h.  */
2571 
2572 void
2573 btrace_call_end (struct btrace_call_iterator *it,
2574 		 const struct btrace_thread_info *btinfo)
2575 {
2576   const struct btrace_function *bfun;
2577 
2578   bfun = btinfo->end;
2579   if (bfun == NULL)
2580     error (_("No trace."));
2581 
2582   it->btinfo = btinfo;
2583   it->function = NULL;
2584 }
2585 
2586 /* See btrace.h.  */
2587 
2588 unsigned int
2589 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2590 {
2591   const struct btrace_function *bfun;
2592   unsigned int steps;
2593 
2594   bfun = it->function;
2595   steps = 0;
2596   while (bfun != NULL)
2597     {
2598       const struct btrace_function *next;
2599       unsigned int insns;
2600 
2601       next = bfun->flow.next;
2602       if (next == NULL)
2603 	{
2604 	  /* Ignore the last function if it only contains a single
2605 	     (i.e. the current) instruction.  */
2606 	  insns = VEC_length (btrace_insn_s, bfun->insn);
2607 	  if (insns == 1)
2608 	    steps -= 1;
2609 	}
2610 
2611       if (stride == steps)
2612 	break;
2613 
2614       bfun = next;
2615       steps += 1;
2616     }
2617 
2618   it->function = bfun;
2619   return steps;
2620 }
2621 
2622 /* See btrace.h.  */
2623 
2624 unsigned int
2625 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2626 {
2627   const struct btrace_thread_info *btinfo;
2628   const struct btrace_function *bfun;
2629   unsigned int steps;
2630 
2631   bfun = it->function;
2632   steps = 0;
2633 
2634   if (bfun == NULL)
2635     {
2636       unsigned int insns;
2637 
2638       btinfo = it->btinfo;
2639       bfun = btinfo->end;
2640       if (bfun == NULL)
2641 	return 0;
2642 
2643       /* Ignore the last function if it only contains a single
2644 	 (i.e. the current) instruction.  */
2645       insns = VEC_length (btrace_insn_s, bfun->insn);
2646       if (insns == 1)
2647 	bfun = bfun->flow.prev;
2648 
2649       if (bfun == NULL)
2650 	return 0;
2651 
2652       steps += 1;
2653     }
2654 
2655   while (steps < stride)
2656     {
2657       const struct btrace_function *prev;
2658 
2659       prev = bfun->flow.prev;
2660       if (prev == NULL)
2661 	break;
2662 
2663       bfun = prev;
2664       steps += 1;
2665     }
2666 
2667   it->function = bfun;
2668   return steps;
2669 }
2670 
2671 /* See btrace.h.  */
2672 
2673 int
2674 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2675 		 const struct btrace_call_iterator *rhs)
2676 {
2677   unsigned int lnum, rnum;
2678 
2679   lnum = btrace_call_number (lhs);
2680   rnum = btrace_call_number (rhs);
2681 
2682   return (int) (lnum - rnum);
2683 }
2684 
2685 /* See btrace.h.  */
2686 
2687 int
2688 btrace_find_call_by_number (struct btrace_call_iterator *it,
2689 			    const struct btrace_thread_info *btinfo,
2690 			    unsigned int number)
2691 {
2692   const struct btrace_function *bfun;
2693 
2694   for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2695     {
2696       unsigned int bnum;
2697 
2698       bnum = bfun->number;
2699       if (number == bnum)
2700 	{
2701 	  it->btinfo = btinfo;
2702 	  it->function = bfun;
2703 	  return 1;
2704 	}
2705 
2706       /* Functions are ordered and numbered consecutively.  We could bail out
2707 	 earlier.  On the other hand, it is very unlikely that we search for
2708 	 a nonexistent function.  */
2709   }
2710 
2711   return 0;
2712 }
2713 
2714 /* See btrace.h.  */
2715 
2716 void
2717 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2718 			 const struct btrace_insn_iterator *begin,
2719 			 const struct btrace_insn_iterator *end)
2720 {
2721   if (btinfo->insn_history == NULL)
2722     btinfo->insn_history = XCNEW (struct btrace_insn_history);
2723 
2724   btinfo->insn_history->begin = *begin;
2725   btinfo->insn_history->end = *end;
2726 }
2727 
2728 /* See btrace.h.  */
2729 
2730 void
2731 btrace_set_call_history (struct btrace_thread_info *btinfo,
2732 			 const struct btrace_call_iterator *begin,
2733 			 const struct btrace_call_iterator *end)
2734 {
2735   gdb_assert (begin->btinfo == end->btinfo);
2736 
2737   if (btinfo->call_history == NULL)
2738     btinfo->call_history = XCNEW (struct btrace_call_history);
2739 
2740   btinfo->call_history->begin = *begin;
2741   btinfo->call_history->end = *end;
2742 }
2743 
2744 /* See btrace.h.  */
2745 
2746 int
2747 btrace_is_replaying (struct thread_info *tp)
2748 {
2749   return tp->btrace.replay != NULL;
2750 }
2751 
2752 /* See btrace.h.  */
2753 
2754 int
2755 btrace_is_empty (struct thread_info *tp)
2756 {
2757   struct btrace_insn_iterator begin, end;
2758   struct btrace_thread_info *btinfo;
2759 
2760   btinfo = &tp->btrace;
2761 
2762   if (btinfo->begin == NULL)
2763     return 1;
2764 
2765   btrace_insn_begin (&begin, btinfo);
2766   btrace_insn_end (&end, btinfo);
2767 
2768   return btrace_insn_cmp (&begin, &end) == 0;
2769 }
2770 
2771 /* Forward the cleanup request.  */
2772 
2773 static void
2774 do_btrace_data_cleanup (void *arg)
2775 {
2776   btrace_data_fini ((struct btrace_data *) arg);
2777 }
2778 
2779 /* See btrace.h.  */
2780 
2781 struct cleanup *
2782 make_cleanup_btrace_data (struct btrace_data *data)
2783 {
2784   return make_cleanup (do_btrace_data_cleanup, data);
2785 }
2786 
2787 #if defined (HAVE_LIBIPT)
2788 
2789 /* Print a single packet.  */
2790 
2791 static void
2792 pt_print_packet (const struct pt_packet *packet)
2793 {
2794   switch (packet->type)
2795     {
2796     default:
2797       printf_unfiltered (("[??: %x]"), packet->type);
2798       break;
2799 
2800     case ppt_psb:
2801       printf_unfiltered (("psb"));
2802       break;
2803 
2804     case ppt_psbend:
2805       printf_unfiltered (("psbend"));
2806       break;
2807 
2808     case ppt_pad:
2809       printf_unfiltered (("pad"));
2810       break;
2811 
2812     case ppt_tip:
2813       printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2814 			 packet->payload.ip.ipc,
2815 			 packet->payload.ip.ip);
2816       break;
2817 
2818     case ppt_tip_pge:
2819       printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2820 			 packet->payload.ip.ipc,
2821 			 packet->payload.ip.ip);
2822       break;
2823 
2824     case ppt_tip_pgd:
2825       printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2826 			 packet->payload.ip.ipc,
2827 			 packet->payload.ip.ip);
2828       break;
2829 
2830     case ppt_fup:
2831       printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2832 			 packet->payload.ip.ipc,
2833 			 packet->payload.ip.ip);
2834       break;
2835 
2836     case ppt_tnt_8:
2837       printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2838 			 packet->payload.tnt.bit_size,
2839 			 packet->payload.tnt.payload);
2840       break;
2841 
2842     case ppt_tnt_64:
2843       printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2844 			 packet->payload.tnt.bit_size,
2845 			 packet->payload.tnt.payload);
2846       break;
2847 
2848     case ppt_pip:
2849       printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2850 			 packet->payload.pip.nr ? (" nr") : (""));
2851       break;
2852 
2853     case ppt_tsc:
2854       printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2855       break;
2856 
2857     case ppt_cbr:
2858       printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2859       break;
2860 
2861     case ppt_mode:
2862       switch (packet->payload.mode.leaf)
2863 	{
2864 	default:
2865 	  printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2866 	  break;
2867 
2868 	case pt_mol_exec:
2869 	  printf_unfiltered (("mode.exec%s%s"),
2870 			     packet->payload.mode.bits.exec.csl
2871 			     ? (" cs.l") : (""),
2872 			     packet->payload.mode.bits.exec.csd
2873 			     ? (" cs.d") : (""));
2874 	  break;
2875 
2876 	case pt_mol_tsx:
2877 	  printf_unfiltered (("mode.tsx%s%s"),
2878 			     packet->payload.mode.bits.tsx.intx
2879 			     ? (" intx") : (""),
2880 			     packet->payload.mode.bits.tsx.abrt
2881 			     ? (" abrt") : (""));
2882 	  break;
2883 	}
2884       break;
2885 
2886     case ppt_ovf:
2887       printf_unfiltered (("ovf"));
2888       break;
2889 
2890     case ppt_stop:
2891       printf_unfiltered (("stop"));
2892       break;
2893 
2894     case ppt_vmcs:
2895       printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2896       break;
2897 
2898     case ppt_tma:
2899       printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2900 			 packet->payload.tma.fc);
2901       break;
2902 
2903     case ppt_mtc:
2904       printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2905       break;
2906 
2907     case ppt_cyc:
2908       printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2909       break;
2910 
2911     case ppt_mnt:
2912       printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2913       break;
2914     }
2915 }
2916 
2917 /* Decode packets into MAINT using DECODER.  */
2918 
2919 static void
2920 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2921 			struct pt_packet_decoder *decoder)
2922 {
2923   int errcode;
2924 
2925   for (;;)
2926     {
2927       struct btrace_pt_packet packet;
2928 
2929       errcode = pt_pkt_sync_forward (decoder);
2930       if (errcode < 0)
2931 	break;
2932 
2933       for (;;)
2934 	{
2935 	  pt_pkt_get_offset (decoder, &packet.offset);
2936 
2937 	  errcode = pt_pkt_next (decoder, &packet.packet,
2938 				 sizeof(packet.packet));
2939 	  if (errcode < 0)
2940 	    break;
2941 
2942 	  if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2943 	    {
2944 	      packet.errcode = pt_errcode (errcode);
2945 	      VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2946 			     &packet);
2947 	    }
2948 	}
2949 
2950       if (errcode == -pte_eos)
2951 	break;
2952 
2953       packet.errcode = pt_errcode (errcode);
2954       VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2955 		     &packet);
2956 
2957       warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2958 	       packet.offset, pt_errstr (packet.errcode));
2959     }
2960 
2961   if (errcode != -pte_eos)
2962     warning (_("Failed to synchronize onto the Intel Processor Trace "
2963 	       "stream: %s."), pt_errstr (pt_errcode (errcode)));
2964 }
2965 
2966 /* Update the packet history in BTINFO.  */
2967 
2968 static void
2969 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2970 {
2971   volatile struct gdb_exception except;
2972   struct pt_packet_decoder *decoder;
2973   struct btrace_data_pt *pt;
2974   struct pt_config config;
2975   int errcode;
2976 
2977   pt = &btinfo->data.variant.pt;
2978 
2979   /* Nothing to do if there is no trace.  */
2980   if (pt->size == 0)
2981     return;
2982 
2983   memset (&config, 0, sizeof(config));
2984 
2985   config.size = sizeof (config);
2986   config.begin = pt->data;
2987   config.end = pt->data + pt->size;
2988 
2989   config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2990   config.cpu.family = pt->config.cpu.family;
2991   config.cpu.model = pt->config.cpu.model;
2992   config.cpu.stepping = pt->config.cpu.stepping;
2993 
2994   errcode = pt_cpu_errata (&config.errata, &config.cpu);
2995   if (errcode < 0)
2996     error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2997 	   pt_errstr (pt_errcode (errcode)));
2998 
2999   decoder = pt_pkt_alloc_decoder (&config);
3000   if (decoder == NULL)
3001     error (_("Failed to allocate the Intel Processor Trace decoder."));
3002 
3003   TRY
3004     {
3005       btrace_maint_decode_pt (&btinfo->maint, decoder);
3006     }
3007   CATCH (except, RETURN_MASK_ALL)
3008     {
3009       pt_pkt_free_decoder (decoder);
3010 
3011       if (except.reason < 0)
3012 	throw_exception (except);
3013     }
3014   END_CATCH
3015 
3016   pt_pkt_free_decoder (decoder);
3017 }
3018 
3019 #endif /* !defined (HAVE_LIBIPT)  */
3020 
3021 /* Update the packet maintenance information for BTINFO and store the
3022    low and high bounds into BEGIN and END, respectively.
3023    Store the current iterator state into FROM and TO.  */
3024 
3025 static void
3026 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3027 			     unsigned int *begin, unsigned int *end,
3028 			     unsigned int *from, unsigned int *to)
3029 {
3030   switch (btinfo->data.format)
3031     {
3032     default:
3033       *begin = 0;
3034       *end = 0;
3035       *from = 0;
3036       *to = 0;
3037       break;
3038 
3039     case BTRACE_FORMAT_BTS:
3040       /* Nothing to do - we operate directly on BTINFO->DATA.  */
3041       *begin = 0;
3042       *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3043       *from = btinfo->maint.variant.bts.packet_history.begin;
3044       *to = btinfo->maint.variant.bts.packet_history.end;
3045       break;
3046 
3047 #if defined (HAVE_LIBIPT)
3048     case BTRACE_FORMAT_PT:
3049       if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3050 	btrace_maint_update_pt_packets (btinfo);
3051 
3052       *begin = 0;
3053       *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3054       *from = btinfo->maint.variant.pt.packet_history.begin;
3055       *to = btinfo->maint.variant.pt.packet_history.end;
3056       break;
3057 #endif /* defined (HAVE_LIBIPT)  */
3058     }
3059 }
3060 
3061 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3062    update the current iterator position.  */
3063 
3064 static void
3065 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3066 			    unsigned int begin, unsigned int end)
3067 {
3068   switch (btinfo->data.format)
3069     {
3070     default:
3071       break;
3072 
3073     case BTRACE_FORMAT_BTS:
3074       {
3075 	VEC (btrace_block_s) *blocks;
3076 	unsigned int blk;
3077 
3078 	blocks = btinfo->data.variant.bts.blocks;
3079 	for (blk = begin; blk < end; ++blk)
3080 	  {
3081 	    const btrace_block_s *block;
3082 
3083 	    block = VEC_index (btrace_block_s, blocks, blk);
3084 
3085 	    printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3086 			       core_addr_to_string_nz (block->begin),
3087 			       core_addr_to_string_nz (block->end));
3088 	  }
3089 
3090 	btinfo->maint.variant.bts.packet_history.begin = begin;
3091 	btinfo->maint.variant.bts.packet_history.end = end;
3092       }
3093       break;
3094 
3095 #if defined (HAVE_LIBIPT)
3096     case BTRACE_FORMAT_PT:
3097       {
3098 	VEC (btrace_pt_packet_s) *packets;
3099 	unsigned int pkt;
3100 
3101 	packets = btinfo->maint.variant.pt.packets;
3102 	for (pkt = begin; pkt < end; ++pkt)
3103 	  {
3104 	    const struct btrace_pt_packet *packet;
3105 
3106 	    packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3107 
3108 	    printf_unfiltered ("%u\t", pkt);
3109 	    printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3110 
3111 	    if (packet->errcode == pte_ok)
3112 	      pt_print_packet (&packet->packet);
3113 	    else
3114 	      printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3115 
3116 	    printf_unfiltered ("\n");
3117 	  }
3118 
3119 	btinfo->maint.variant.pt.packet_history.begin = begin;
3120 	btinfo->maint.variant.pt.packet_history.end = end;
3121       }
3122       break;
3123 #endif /* defined (HAVE_LIBIPT)  */
3124     }
3125 }
3126 
3127 /* Read a number from an argument string.  */
3128 
3129 static unsigned int
3130 get_uint (char **arg)
3131 {
3132   char *begin, *end, *pos;
3133   unsigned long number;
3134 
3135   begin = *arg;
3136   pos = skip_spaces (begin);
3137 
3138   if (!isdigit (*pos))
3139     error (_("Expected positive number, got: %s."), pos);
3140 
3141   number = strtoul (pos, &end, 10);
3142   if (number > UINT_MAX)
3143     error (_("Number too big."));
3144 
3145   *arg += (end - begin);
3146 
3147   return (unsigned int) number;
3148 }
3149 
3150 /* Read a context size from an argument string.  */
3151 
3152 static int
3153 get_context_size (char **arg)
3154 {
3155   char *pos;
3156   int number;
3157 
3158   pos = skip_spaces (*arg);
3159 
3160   if (!isdigit (*pos))
3161     error (_("Expected positive number, got: %s."), pos);
3162 
3163   return strtol (pos, arg, 10);
3164 }
3165 
3166 /* Complain about junk at the end of an argument string.  */
3167 
3168 static void
3169 no_chunk (char *arg)
3170 {
3171   if (*arg != 0)
3172     error (_("Junk after argument: %s."), arg);
3173 }
3174 
3175 /* The "maintenance btrace packet-history" command.  */
3176 
3177 static void
3178 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3179 {
3180   struct btrace_thread_info *btinfo;
3181   struct thread_info *tp;
3182   unsigned int size, begin, end, from, to;
3183 
3184   tp = find_thread_ptid (inferior_ptid);
3185   if (tp == NULL)
3186     error (_("No thread."));
3187 
3188   size = 10;
3189   btinfo = &tp->btrace;
3190 
3191   btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3192   if (begin == end)
3193     {
3194       printf_unfiltered (_("No trace.\n"));
3195       return;
3196     }
3197 
3198   if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3199     {
3200       from = to;
3201 
3202       if (end - from < size)
3203 	size = end - from;
3204       to = from + size;
3205     }
3206   else if (strcmp (arg, "-") == 0)
3207     {
3208       to = from;
3209 
3210       if (to - begin < size)
3211 	size = to - begin;
3212       from = to - size;
3213     }
3214   else
3215     {
3216       from = get_uint (&arg);
3217       if (end <= from)
3218 	error (_("'%u' is out of range."), from);
3219 
3220       arg = skip_spaces (arg);
3221       if (*arg == ',')
3222 	{
3223 	  arg = skip_spaces (++arg);
3224 
3225 	  if (*arg == '+')
3226 	    {
3227 	      arg += 1;
3228 	      size = get_context_size (&arg);
3229 
3230 	      no_chunk (arg);
3231 
3232 	      if (end - from < size)
3233 		size = end - from;
3234 	      to = from + size;
3235 	    }
3236 	  else if (*arg == '-')
3237 	    {
3238 	      arg += 1;
3239 	      size = get_context_size (&arg);
3240 
3241 	      no_chunk (arg);
3242 
3243 	      /* Include the packet given as first argument.  */
3244 	      from += 1;
3245 	      to = from;
3246 
3247 	      if (to - begin < size)
3248 		size = to - begin;
3249 	      from = to - size;
3250 	    }
3251 	  else
3252 	    {
3253 	      to = get_uint (&arg);
3254 
3255 	      /* Include the packet at the second argument and silently
3256 		 truncate the range.  */
3257 	      if (to < end)
3258 		to += 1;
3259 	      else
3260 		to = end;
3261 
3262 	      no_chunk (arg);
3263 	    }
3264 	}
3265       else
3266 	{
3267 	  no_chunk (arg);
3268 
3269 	  if (end - from < size)
3270 	    size = end - from;
3271 	  to = from + size;
3272 	}
3273 
3274       dont_repeat ();
3275     }
3276 
3277   btrace_maint_print_packets (btinfo, from, to);
3278 }
3279 
3280 /* The "maintenance btrace clear-packet-history" command.  */
3281 
3282 static void
3283 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3284 {
3285   struct btrace_thread_info *btinfo;
3286   struct thread_info *tp;
3287 
3288   if (args != NULL && *args != 0)
3289     error (_("Invalid argument."));
3290 
3291   tp = find_thread_ptid (inferior_ptid);
3292   if (tp == NULL)
3293     error (_("No thread."));
3294 
3295   btinfo = &tp->btrace;
3296 
3297   /* Must clear the maint data before - it depends on BTINFO->DATA.  */
3298   btrace_maint_clear (btinfo);
3299   btrace_data_clear (&btinfo->data);
3300 }
3301 
3302 /* The "maintenance btrace clear" command.  */
3303 
3304 static void
3305 maint_btrace_clear_cmd (char *args, int from_tty)
3306 {
3307   struct btrace_thread_info *btinfo;
3308   struct thread_info *tp;
3309 
3310   if (args != NULL && *args != 0)
3311     error (_("Invalid argument."));
3312 
3313   tp = find_thread_ptid (inferior_ptid);
3314   if (tp == NULL)
3315     error (_("No thread."));
3316 
3317   btrace_clear (tp);
3318 }
3319 
3320 /* The "maintenance btrace" command.  */
3321 
3322 static void
3323 maint_btrace_cmd (char *args, int from_tty)
3324 {
3325   help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3326 	     gdb_stdout);
3327 }
3328 
3329 /* The "maintenance set btrace" command.  */
3330 
3331 static void
3332 maint_btrace_set_cmd (char *args, int from_tty)
3333 {
3334   help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3335 	     gdb_stdout);
3336 }
3337 
3338 /* The "maintenance show btrace" command.  */
3339 
3340 static void
3341 maint_btrace_show_cmd (char *args, int from_tty)
3342 {
3343   help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3344 	     all_commands, gdb_stdout);
3345 }
3346 
3347 /* The "maintenance set btrace pt" command.  */
3348 
3349 static void
3350 maint_btrace_pt_set_cmd (char *args, int from_tty)
3351 {
3352   help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3353 	     all_commands, gdb_stdout);
3354 }
3355 
3356 /* The "maintenance show btrace pt" command.  */
3357 
3358 static void
3359 maint_btrace_pt_show_cmd (char *args, int from_tty)
3360 {
3361   help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3362 	     all_commands, gdb_stdout);
3363 }
3364 
3365 /* The "maintenance info btrace" command.  */
3366 
3367 static void
3368 maint_info_btrace_cmd (char *args, int from_tty)
3369 {
3370   struct btrace_thread_info *btinfo;
3371   struct thread_info *tp;
3372   const struct btrace_config *conf;
3373 
3374   if (args != NULL && *args != 0)
3375     error (_("Invalid argument."));
3376 
3377   tp = find_thread_ptid (inferior_ptid);
3378   if (tp == NULL)
3379     error (_("No thread."));
3380 
3381   btinfo = &tp->btrace;
3382 
3383   conf = btrace_conf (btinfo);
3384   if (conf == NULL)
3385     error (_("No btrace configuration."));
3386 
3387   printf_unfiltered (_("Format: %s.\n"),
3388 		     btrace_format_string (conf->format));
3389 
3390   switch (conf->format)
3391     {
3392     default:
3393       break;
3394 
3395     case BTRACE_FORMAT_BTS:
3396       printf_unfiltered (_("Number of packets: %u.\n"),
3397 			 VEC_length (btrace_block_s,
3398 				     btinfo->data.variant.bts.blocks));
3399       break;
3400 
3401 #if defined (HAVE_LIBIPT)
3402     case BTRACE_FORMAT_PT:
3403       {
3404 	struct pt_version version;
3405 
3406 	version = pt_library_version ();
3407 	printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3408 			   version.minor, version.build,
3409 			   version.ext != NULL ? version.ext : "");
3410 
3411 	btrace_maint_update_pt_packets (btinfo);
3412 	printf_unfiltered (_("Number of packets: %u.\n"),
3413 			   VEC_length (btrace_pt_packet_s,
3414 				       btinfo->maint.variant.pt.packets));
3415       }
3416       break;
3417 #endif /* defined (HAVE_LIBIPT)  */
3418     }
3419 }
3420 
3421 /* The "maint show btrace pt skip-pad" show value function. */
3422 
3423 static void
3424 show_maint_btrace_pt_skip_pad  (struct ui_file *file, int from_tty,
3425 				  struct cmd_list_element *c,
3426 				  const char *value)
3427 {
3428   fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3429 }
3430 
3431 
3432 /* Initialize btrace maintenance commands.  */
3433 
3434 void _initialize_btrace (void);
3435 void
3436 _initialize_btrace (void)
3437 {
3438   add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3439 	   _("Info about branch tracing data."), &maintenanceinfolist);
3440 
3441   add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3442 		  _("Branch tracing maintenance commands."),
3443 		  &maint_btrace_cmdlist, "maintenance btrace ",
3444 		  0, &maintenancelist);
3445 
3446   add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3447 Set branch tracing specific variables."),
3448                   &maint_btrace_set_cmdlist, "maintenance set btrace ",
3449                   0, &maintenance_set_cmdlist);
3450 
3451   add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3452 Set Intel Processor Trace specific variables."),
3453                   &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3454                   0, &maint_btrace_set_cmdlist);
3455 
3456   add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3457 Show branch tracing specific variables."),
3458                   &maint_btrace_show_cmdlist, "maintenance show btrace ",
3459                   0, &maintenance_show_cmdlist);
3460 
3461   add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3462 Show Intel Processor Trace specific variables."),
3463                   &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3464                   0, &maint_btrace_show_cmdlist);
3465 
3466   add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3467 			   &maint_btrace_pt_skip_pad, _("\
3468 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3469 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3470 When enabled, PAD packets are ignored in the btrace packet history."),
3471 			   NULL, show_maint_btrace_pt_skip_pad,
3472 			   &maint_btrace_pt_set_cmdlist,
3473 			   &maint_btrace_pt_show_cmdlist);
3474 
3475   add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3476 	   _("Print the raw branch tracing data.\n\
3477 With no argument, print ten more packets after the previous ten-line print.\n\
3478 With '-' as argument print ten packets before a previous ten-line print.\n\
3479 One argument specifies the starting packet of a ten-line print.\n\
3480 Two arguments with comma between specify starting and ending packets to \
3481 print.\n\
3482 Preceded with '+'/'-' the second argument specifies the distance from the \
3483 first.\n"),
3484 	   &maint_btrace_cmdlist);
3485 
3486   add_cmd ("clear-packet-history", class_maintenance,
3487 	   maint_btrace_clear_packet_history_cmd,
3488 	   _("Clears the branch tracing packet history.\n\
3489 Discards the raw branch tracing data but not the execution history data.\n\
3490 "),
3491 	   &maint_btrace_cmdlist);
3492 
3493   add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3494 	   _("Clears the branch tracing data.\n\
3495 Discards the raw branch tracing data and the execution history data.\n\
3496 The next 'record' command will fetch the branch tracing data anew.\n\
3497 "),
3498 	   &maint_btrace_cmdlist);
3499 
3500 }
3501