xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/btrace.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Branch trace support for GDB, the GNU debugger.
2 
3    Copyright (C) 2013-2016 Free Software Foundation, Inc.
4 
5    Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37 
38 #include <inttypes.h>
39 #include <ctype.h>
40 
41 /* Command lists for btrace maintenance commands.  */
42 static struct cmd_list_element *maint_btrace_cmdlist;
43 static struct cmd_list_element *maint_btrace_set_cmdlist;
44 static struct cmd_list_element *maint_btrace_show_cmdlist;
45 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
47 
48 /* Control whether to skip PAD packets when computing the packet history.  */
49 static int maint_btrace_pt_skip_pad = 1;
50 
51 static void btrace_add_pc (struct thread_info *tp);
52 
53 /* Print a record debug message.  Use do ... while (0) to avoid ambiguities
54    when used in if statements.  */
55 
56 #define DEBUG(msg, args...)						\
57   do									\
58     {									\
59       if (record_debug != 0)						\
60         fprintf_unfiltered (gdb_stdlog,					\
61 			    "[btrace] " msg "\n", ##args);		\
62     }									\
63   while (0)
64 
65 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66 
67 /* Return the function name of a recorded function segment for printing.
68    This function never returns NULL.  */
69 
70 static const char *
71 ftrace_print_function_name (const struct btrace_function *bfun)
72 {
73   struct minimal_symbol *msym;
74   struct symbol *sym;
75 
76   msym = bfun->msym;
77   sym = bfun->sym;
78 
79   if (sym != NULL)
80     return SYMBOL_PRINT_NAME (sym);
81 
82   if (msym != NULL)
83     return MSYMBOL_PRINT_NAME (msym);
84 
85   return "<unknown>";
86 }
87 
88 /* Return the file name of a recorded function segment for printing.
89    This function never returns NULL.  */
90 
91 static const char *
92 ftrace_print_filename (const struct btrace_function *bfun)
93 {
94   struct symbol *sym;
95   const char *filename;
96 
97   sym = bfun->sym;
98 
99   if (sym != NULL)
100     filename = symtab_to_filename_for_display (symbol_symtab (sym));
101   else
102     filename = "<unknown>";
103 
104   return filename;
105 }
106 
107 /* Return a string representation of the address of an instruction.
108    This function never returns NULL.  */
109 
110 static const char *
111 ftrace_print_insn_addr (const struct btrace_insn *insn)
112 {
113   if (insn == NULL)
114     return "<nil>";
115 
116   return core_addr_to_string_nz (insn->pc);
117 }
118 
119 /* Print an ftrace debug status message.  */
120 
121 static void
122 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
123 {
124   const char *fun, *file;
125   unsigned int ibegin, iend;
126   int level;
127 
128   fun = ftrace_print_function_name (bfun);
129   file = ftrace_print_filename (bfun);
130   level = bfun->level;
131 
132   ibegin = bfun->insn_offset;
133   iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134 
135   DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136 		prefix, fun, file, level, ibegin, iend);
137 }
138 
139 /* Return non-zero if BFUN does not match MFUN and FUN,
140    return zero otherwise.  */
141 
142 static int
143 ftrace_function_switched (const struct btrace_function *bfun,
144 			  const struct minimal_symbol *mfun,
145 			  const struct symbol *fun)
146 {
147   struct minimal_symbol *msym;
148   struct symbol *sym;
149 
150   msym = bfun->msym;
151   sym = bfun->sym;
152 
153   /* If the minimal symbol changed, we certainly switched functions.  */
154   if (mfun != NULL && msym != NULL
155       && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
156     return 1;
157 
158   /* If the symbol changed, we certainly switched functions.  */
159   if (fun != NULL && sym != NULL)
160     {
161       const char *bfname, *fname;
162 
163       /* Check the function name.  */
164       if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165 	return 1;
166 
167       /* Check the location of those functions, as well.  */
168       bfname = symtab_to_fullname (symbol_symtab (sym));
169       fname = symtab_to_fullname (symbol_symtab (fun));
170       if (filename_cmp (fname, bfname) != 0)
171 	return 1;
172     }
173 
174   /* If we lost symbol information, we switched functions.  */
175   if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176     return 1;
177 
178   /* If we gained symbol information, we switched functions.  */
179   if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180     return 1;
181 
182   return 0;
183 }
184 
185 /* Allocate and initialize a new branch trace function segment.
186    PREV is the chronologically preceding function segment.
187    MFUN and FUN are the symbol information we have for this function.  */
188 
189 static struct btrace_function *
190 ftrace_new_function (struct btrace_function *prev,
191 		     struct minimal_symbol *mfun,
192 		     struct symbol *fun)
193 {
194   struct btrace_function *bfun;
195 
196   bfun = XCNEW (struct btrace_function);
197 
198   bfun->msym = mfun;
199   bfun->sym = fun;
200   bfun->flow.prev = prev;
201 
202   if (prev == NULL)
203     {
204       /* Start counting at one.  */
205       bfun->number = 1;
206       bfun->insn_offset = 1;
207     }
208   else
209     {
210       gdb_assert (prev->flow.next == NULL);
211       prev->flow.next = bfun;
212 
213       bfun->number = prev->number + 1;
214       bfun->insn_offset = (prev->insn_offset
215 			   + VEC_length (btrace_insn_s, prev->insn));
216       bfun->level = prev->level;
217     }
218 
219   return bfun;
220 }
221 
222 /* Update the UP field of a function segment.  */
223 
224 static void
225 ftrace_update_caller (struct btrace_function *bfun,
226 		      struct btrace_function *caller,
227 		      enum btrace_function_flag flags)
228 {
229   if (bfun->up != NULL)
230     ftrace_debug (bfun, "updating caller");
231 
232   bfun->up = caller;
233   bfun->flags = flags;
234 
235   ftrace_debug (bfun, "set caller");
236 }
237 
238 /* Fix up the caller for all segments of a function.  */
239 
240 static void
241 ftrace_fixup_caller (struct btrace_function *bfun,
242 		     struct btrace_function *caller,
243 		     enum btrace_function_flag flags)
244 {
245   struct btrace_function *prev, *next;
246 
247   ftrace_update_caller (bfun, caller, flags);
248 
249   /* Update all function segments belonging to the same function.  */
250   for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251     ftrace_update_caller (prev, caller, flags);
252 
253   for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254     ftrace_update_caller (next, caller, flags);
255 }
256 
257 /* Add a new function segment for a call.
258    CALLER is the chronologically preceding function segment.
259    MFUN and FUN are the symbol information we have for this function.  */
260 
261 static struct btrace_function *
262 ftrace_new_call (struct btrace_function *caller,
263 		 struct minimal_symbol *mfun,
264 		 struct symbol *fun)
265 {
266   struct btrace_function *bfun;
267 
268   bfun = ftrace_new_function (caller, mfun, fun);
269   bfun->up = caller;
270   bfun->level += 1;
271 
272   ftrace_debug (bfun, "new call");
273 
274   return bfun;
275 }
276 
277 /* Add a new function segment for a tail call.
278    CALLER is the chronologically preceding function segment.
279    MFUN and FUN are the symbol information we have for this function.  */
280 
281 static struct btrace_function *
282 ftrace_new_tailcall (struct btrace_function *caller,
283 		     struct minimal_symbol *mfun,
284 		     struct symbol *fun)
285 {
286   struct btrace_function *bfun;
287 
288   bfun = ftrace_new_function (caller, mfun, fun);
289   bfun->up = caller;
290   bfun->level += 1;
291   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
292 
293   ftrace_debug (bfun, "new tail call");
294 
295   return bfun;
296 }
297 
298 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299    symbol information.  */
300 
301 static struct btrace_function *
302 ftrace_find_caller (struct btrace_function *bfun,
303 		    struct minimal_symbol *mfun,
304 		    struct symbol *fun)
305 {
306   for (; bfun != NULL; bfun = bfun->up)
307     {
308       /* Skip functions with incompatible symbol information.  */
309       if (ftrace_function_switched (bfun, mfun, fun))
310 	continue;
311 
312       /* This is the function segment we're looking for.  */
313       break;
314     }
315 
316   return bfun;
317 }
318 
319 /* Find the innermost caller in the back trace of BFUN, skipping all
320    function segments that do not end with a call instruction (e.g.
321    tail calls ending with a jump).  */
322 
323 static struct btrace_function *
324 ftrace_find_call (struct btrace_function *bfun)
325 {
326   for (; bfun != NULL; bfun = bfun->up)
327     {
328       struct btrace_insn *last;
329 
330       /* Skip gaps.  */
331       if (bfun->errcode != 0)
332 	continue;
333 
334       last = VEC_last (btrace_insn_s, bfun->insn);
335 
336       if (last->iclass == BTRACE_INSN_CALL)
337 	break;
338     }
339 
340   return bfun;
341 }
342 
343 /* Add a continuation segment for a function into which we return.
344    PREV is the chronologically preceding function segment.
345    MFUN and FUN are the symbol information we have for this function.  */
346 
347 static struct btrace_function *
348 ftrace_new_return (struct btrace_function *prev,
349 		   struct minimal_symbol *mfun,
350 		   struct symbol *fun)
351 {
352   struct btrace_function *bfun, *caller;
353 
354   bfun = ftrace_new_function (prev, mfun, fun);
355 
356   /* It is important to start at PREV's caller.  Otherwise, we might find
357      PREV itself, if PREV is a recursive function.  */
358   caller = ftrace_find_caller (prev->up, mfun, fun);
359   if (caller != NULL)
360     {
361       /* The caller of PREV is the preceding btrace function segment in this
362 	 function instance.  */
363       gdb_assert (caller->segment.next == NULL);
364 
365       caller->segment.next = bfun;
366       bfun->segment.prev = caller;
367 
368       /* Maintain the function level.  */
369       bfun->level = caller->level;
370 
371       /* Maintain the call stack.  */
372       bfun->up = caller->up;
373       bfun->flags = caller->flags;
374 
375       ftrace_debug (bfun, "new return");
376     }
377   else
378     {
379       /* We did not find a caller.  This could mean that something went
380 	 wrong or that the call is simply not included in the trace.  */
381 
382       /* Let's search for some actual call.  */
383       caller = ftrace_find_call (prev->up);
384       if (caller == NULL)
385 	{
386 	  /* There is no call in PREV's back trace.  We assume that the
387 	     branch trace did not include it.  */
388 
389 	  /* Let's find the topmost call function - this skips tail calls.  */
390 	  while (prev->up != NULL)
391 	    prev = prev->up;
392 
393 	  /* We maintain levels for a series of returns for which we have
394 	     not seen the calls.
395 	     We start at the preceding function's level in case this has
396 	     already been a return for which we have not seen the call.
397 	     We start at level 0 otherwise, to handle tail calls correctly.  */
398 	  bfun->level = min (0, prev->level) - 1;
399 
400 	  /* Fix up the call stack for PREV.  */
401 	  ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
402 
403 	  ftrace_debug (bfun, "new return - no caller");
404 	}
405       else
406 	{
407 	  /* There is a call in PREV's back trace to which we should have
408 	     returned.  Let's remain at this level.  */
409 	  bfun->level = prev->level;
410 
411 	  ftrace_debug (bfun, "new return - unknown caller");
412 	}
413     }
414 
415   return bfun;
416 }
417 
418 /* Add a new function segment for a function switch.
419    PREV is the chronologically preceding function segment.
420    MFUN and FUN are the symbol information we have for this function.  */
421 
422 static struct btrace_function *
423 ftrace_new_switch (struct btrace_function *prev,
424 		   struct minimal_symbol *mfun,
425 		   struct symbol *fun)
426 {
427   struct btrace_function *bfun;
428 
429   /* This is an unexplained function switch.  The call stack will likely
430      be wrong at this point.  */
431   bfun = ftrace_new_function (prev, mfun, fun);
432 
433   ftrace_debug (bfun, "new switch");
434 
435   return bfun;
436 }
437 
438 /* Add a new function segment for a gap in the trace due to a decode error.
439    PREV is the chronologically preceding function segment.
440    ERRCODE is the format-specific error code.  */
441 
442 static struct btrace_function *
443 ftrace_new_gap (struct btrace_function *prev, int errcode)
444 {
445   struct btrace_function *bfun;
446 
447   /* We hijack prev if it was empty.  */
448   if (prev != NULL && prev->errcode == 0
449       && VEC_empty (btrace_insn_s, prev->insn))
450     bfun = prev;
451   else
452     bfun = ftrace_new_function (prev, NULL, NULL);
453 
454   bfun->errcode = errcode;
455 
456   ftrace_debug (bfun, "new gap");
457 
458   return bfun;
459 }
460 
461 /* Update BFUN with respect to the instruction at PC.  This may create new
462    function segments.
463    Return the chronologically latest function segment, never NULL.  */
464 
465 static struct btrace_function *
466 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
467 {
468   struct bound_minimal_symbol bmfun;
469   struct minimal_symbol *mfun;
470   struct symbol *fun;
471   struct btrace_insn *last;
472 
473   /* Try to determine the function we're in.  We use both types of symbols
474      to avoid surprises when we sometimes get a full symbol and sometimes
475      only a minimal symbol.  */
476   fun = find_pc_function (pc);
477   bmfun = lookup_minimal_symbol_by_pc (pc);
478   mfun = bmfun.minsym;
479 
480   if (fun == NULL && mfun == NULL)
481     DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482 
483   /* If we didn't have a function or if we had a gap before, we create one.  */
484   if (bfun == NULL || bfun->errcode != 0)
485     return ftrace_new_function (bfun, mfun, fun);
486 
487   /* Check the last instruction, if we have one.
488      We do this check first, since it allows us to fill in the call stack
489      links in addition to the normal flow links.  */
490   last = NULL;
491   if (!VEC_empty (btrace_insn_s, bfun->insn))
492     last = VEC_last (btrace_insn_s, bfun->insn);
493 
494   if (last != NULL)
495     {
496       switch (last->iclass)
497 	{
498 	case BTRACE_INSN_RETURN:
499 	  {
500 	    const char *fname;
501 
502 	    /* On some systems, _dl_runtime_resolve returns to the resolved
503 	       function instead of jumping to it.  From our perspective,
504 	       however, this is a tailcall.
505 	       If we treated it as return, we wouldn't be able to find the
506 	       resolved function in our stack back trace.  Hence, we would
507 	       lose the current stack back trace and start anew with an empty
508 	       back trace.  When the resolved function returns, we would then
509 	       create a stack back trace with the same function names but
510 	       different frame id's.  This will confuse stepping.  */
511 	    fname = ftrace_print_function_name (bfun);
512 	    if (strcmp (fname, "_dl_runtime_resolve") == 0)
513 	      return ftrace_new_tailcall (bfun, mfun, fun);
514 
515 	    return ftrace_new_return (bfun, mfun, fun);
516 	  }
517 
518 	case BTRACE_INSN_CALL:
519 	  /* Ignore calls to the next instruction.  They are used for PIC.  */
520 	  if (last->pc + last->size == pc)
521 	    break;
522 
523 	  return ftrace_new_call (bfun, mfun, fun);
524 
525 	case BTRACE_INSN_JUMP:
526 	  {
527 	    CORE_ADDR start;
528 
529 	    start = get_pc_function_start (pc);
530 
531 	    /* If we can't determine the function for PC, we treat a jump at
532 	       the end of the block as tail call.  */
533 	    if (start == 0 || start == pc)
534 	      return ftrace_new_tailcall (bfun, mfun, fun);
535 	  }
536 	}
537     }
538 
539   /* Check if we're switching functions for some other reason.  */
540   if (ftrace_function_switched (bfun, mfun, fun))
541     {
542       DEBUG_FTRACE ("switching from %s in %s at %s",
543 		    ftrace_print_insn_addr (last),
544 		    ftrace_print_function_name (bfun),
545 		    ftrace_print_filename (bfun));
546 
547       return ftrace_new_switch (bfun, mfun, fun);
548     }
549 
550   return bfun;
551 }
552 
553 /* Add the instruction at PC to BFUN's instructions.  */
554 
555 static void
556 ftrace_update_insns (struct btrace_function *bfun,
557 		     const struct btrace_insn *insn)
558 {
559   VEC_safe_push (btrace_insn_s, bfun->insn, insn);
560 
561   if (record_debug > 1)
562     ftrace_debug (bfun, "update insn");
563 }
564 
565 /* Classify the instruction at PC.  */
566 
567 static enum btrace_insn_class
568 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
569 {
570   enum btrace_insn_class iclass;
571 
572   iclass = BTRACE_INSN_OTHER;
573   TRY
574     {
575       if (gdbarch_insn_is_call (gdbarch, pc))
576 	iclass = BTRACE_INSN_CALL;
577       else if (gdbarch_insn_is_ret (gdbarch, pc))
578 	iclass = BTRACE_INSN_RETURN;
579       else if (gdbarch_insn_is_jump (gdbarch, pc))
580 	iclass = BTRACE_INSN_JUMP;
581     }
582   CATCH (error, RETURN_MASK_ERROR)
583     {
584     }
585   END_CATCH
586 
587   return iclass;
588 }
589 
590 /* Compute the function branch trace from BTS trace.  */
591 
592 static void
593 btrace_compute_ftrace_bts (struct thread_info *tp,
594 			   const struct btrace_data_bts *btrace)
595 {
596   struct btrace_thread_info *btinfo;
597   struct btrace_function *begin, *end;
598   struct gdbarch *gdbarch;
599   unsigned int blk, ngaps;
600   int level;
601 
602   gdbarch = target_gdbarch ();
603   btinfo = &tp->btrace;
604   begin = btinfo->begin;
605   end = btinfo->end;
606   ngaps = btinfo->ngaps;
607   level = begin != NULL ? -btinfo->level : INT_MAX;
608   blk = VEC_length (btrace_block_s, btrace->blocks);
609 
610   while (blk != 0)
611     {
612       btrace_block_s *block;
613       CORE_ADDR pc;
614 
615       blk -= 1;
616 
617       block = VEC_index (btrace_block_s, btrace->blocks, blk);
618       pc = block->begin;
619 
620       for (;;)
621 	{
622 	  struct btrace_insn insn;
623 	  int size;
624 
625 	  /* We should hit the end of the block.  Warn if we went too far.  */
626 	  if (block->end < pc)
627 	    {
628 	      /* Indicate the gap in the trace - unless we're at the
629 		 beginning.  */
630 	      if (begin != NULL)
631 		{
632 		  warning (_("Recorded trace may be corrupted around %s."),
633 			   core_addr_to_string_nz (pc));
634 
635 		  end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636 		  ngaps += 1;
637 		}
638 	      break;
639 	    }
640 
641 	  end = ftrace_update_function (end, pc);
642 	  if (begin == NULL)
643 	    begin = end;
644 
645 	  /* Maintain the function level offset.
646 	     For all but the last block, we do it here.  */
647 	  if (blk != 0)
648 	    level = min (level, end->level);
649 
650 	  size = 0;
651 	  TRY
652 	    {
653 	      size = gdb_insn_length (gdbarch, pc);
654 	    }
655 	  CATCH (error, RETURN_MASK_ERROR)
656 	    {
657 	    }
658 	  END_CATCH
659 
660 	  insn.pc = pc;
661 	  insn.size = size;
662 	  insn.iclass = ftrace_classify_insn (gdbarch, pc);
663 	  insn.flags = 0;
664 
665 	  ftrace_update_insns (end, &insn);
666 
667 	  /* We're done once we pushed the instruction at the end.  */
668 	  if (block->end == pc)
669 	    break;
670 
671 	  /* We can't continue if we fail to compute the size.  */
672 	  if (size <= 0)
673 	    {
674 	      warning (_("Recorded trace may be incomplete around %s."),
675 		       core_addr_to_string_nz (pc));
676 
677 	      /* Indicate the gap in the trace.  We just added INSN so we're
678 		 not at the beginning.  */
679 	      end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
680 	      ngaps += 1;
681 
682 	      break;
683 	    }
684 
685 	  pc += size;
686 
687 	  /* Maintain the function level offset.
688 	     For the last block, we do it here to not consider the last
689 	     instruction.
690 	     Since the last instruction corresponds to the current instruction
691 	     and is not really part of the execution history, it shouldn't
692 	     affect the level.  */
693 	  if (blk == 0)
694 	    level = min (level, end->level);
695 	}
696     }
697 
698   btinfo->begin = begin;
699   btinfo->end = end;
700   btinfo->ngaps = ngaps;
701 
702   /* LEVEL is the minimal function level of all btrace function segments.
703      Define the global level offset to -LEVEL so all function levels are
704      normalized to start at zero.  */
705   btinfo->level = -level;
706 }
707 
708 #if defined (HAVE_LIBIPT)
709 
710 static enum btrace_insn_class
711 pt_reclassify_insn (enum pt_insn_class iclass)
712 {
713   switch (iclass)
714     {
715     case ptic_call:
716       return BTRACE_INSN_CALL;
717 
718     case ptic_return:
719       return BTRACE_INSN_RETURN;
720 
721     case ptic_jump:
722       return BTRACE_INSN_JUMP;
723 
724     default:
725       return BTRACE_INSN_OTHER;
726     }
727 }
728 
729 /* Return the btrace instruction flags for INSN.  */
730 
731 static btrace_insn_flags
732 pt_btrace_insn_flags (const struct pt_insn *insn)
733 {
734   btrace_insn_flags flags = 0;
735 
736   if (insn->speculative)
737     flags |= BTRACE_INSN_FLAG_SPECULATIVE;
738 
739   return flags;
740 }
741 
742 /* Add function branch trace using DECODER.  */
743 
744 static void
745 ftrace_add_pt (struct pt_insn_decoder *decoder,
746 	       struct btrace_function **pbegin,
747 	       struct btrace_function **pend, int *plevel,
748 	       unsigned int *ngaps)
749 {
750   struct btrace_function *begin, *end, *upd;
751   uint64_t offset;
752   int errcode, nerrors;
753 
754   begin = *pbegin;
755   end = *pend;
756   nerrors = 0;
757   for (;;)
758     {
759       struct btrace_insn btinsn;
760       struct pt_insn insn;
761 
762       errcode = pt_insn_sync_forward (decoder);
763       if (errcode < 0)
764 	{
765 	  if (errcode != -pte_eos)
766 	    warning (_("Failed to synchronize onto the Intel Processor "
767 		       "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
768 	  break;
769 	}
770 
771       memset (&btinsn, 0, sizeof (btinsn));
772       for (;;)
773 	{
774 	  errcode = pt_insn_next (decoder, &insn, sizeof(insn));
775 	  if (errcode < 0)
776 	    break;
777 
778 	  /* Look for gaps in the trace - unless we're at the beginning.  */
779 	  if (begin != NULL)
780 	    {
781 	      /* Tracing is disabled and re-enabled each time we enter the
782 		 kernel.  Most times, we continue from the same instruction we
783 		 stopped before.  This is indicated via the RESUMED instruction
784 		 flag.  The ENABLED instruction flag means that we continued
785 		 from some other instruction.  Indicate this as a trace gap.  */
786 	      if (insn.enabled)
787 		*pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
788 
789 	      /* Indicate trace overflows.  */
790 	      if (insn.resynced)
791 		*pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
792 	    }
793 
794 	  upd = ftrace_update_function (end, insn.ip);
795 	  if (upd != end)
796 	    {
797 	      *pend = end = upd;
798 
799 	      if (begin == NULL)
800 		*pbegin = begin = upd;
801 	    }
802 
803 	  /* Maintain the function level offset.  */
804 	  *plevel = min (*plevel, end->level);
805 
806 	  btinsn.pc = (CORE_ADDR) insn.ip;
807 	  btinsn.size = (gdb_byte) insn.size;
808 	  btinsn.iclass = pt_reclassify_insn (insn.iclass);
809 	  btinsn.flags = pt_btrace_insn_flags (&insn);
810 
811 	  ftrace_update_insns (end, &btinsn);
812 	}
813 
814       if (errcode == -pte_eos)
815 	break;
816 
817       /* If the gap is at the very beginning, we ignore it - we will have
818 	 less trace, but we won't have any holes in the trace.  */
819       if (begin == NULL)
820 	continue;
821 
822       pt_insn_get_offset (decoder, &offset);
823 
824       warning (_("Failed to decode Intel Processor Trace near trace "
825 		 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
826 	       offset, insn.ip, pt_errstr (pt_errcode (errcode)));
827 
828       /* Indicate the gap in the trace.  */
829       *pend = end = ftrace_new_gap (end, errcode);
830       *ngaps += 1;
831     }
832 
833   if (nerrors > 0)
834     warning (_("The recorded execution trace may have gaps."));
835 }
836 
837 /* A callback function to allow the trace decoder to read the inferior's
838    memory.  */
839 
840 static int
841 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
842 			    const struct pt_asid *asid, uint64_t pc,
843 			    void *context)
844 {
845   int result, errcode;
846 
847   result = (int) size;
848   TRY
849     {
850       errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
851       if (errcode != 0)
852 	result = -pte_nomap;
853     }
854   CATCH (error, RETURN_MASK_ERROR)
855     {
856       result = -pte_nomap;
857     }
858   END_CATCH
859 
860   return result;
861 }
862 
863 /* Translate the vendor from one enum to another.  */
864 
865 static enum pt_cpu_vendor
866 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
867 {
868   switch (vendor)
869     {
870     default:
871       return pcv_unknown;
872 
873     case CV_INTEL:
874       return pcv_intel;
875     }
876 }
877 
878 /* Finalize the function branch trace after decode.  */
879 
880 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
881 				       struct thread_info *tp, int level)
882 {
883   pt_insn_free_decoder (decoder);
884 
885   /* LEVEL is the minimal function level of all btrace function segments.
886      Define the global level offset to -LEVEL so all function levels are
887      normalized to start at zero.  */
888   tp->btrace.level = -level;
889 
890   /* Add a single last instruction entry for the current PC.
891      This allows us to compute the backtrace at the current PC using both
892      standard unwind and btrace unwind.
893      This extra entry is ignored by all record commands.  */
894   btrace_add_pc (tp);
895 }
896 
897 /* Compute the function branch trace from Intel Processor Trace
898    format.  */
899 
900 static void
901 btrace_compute_ftrace_pt (struct thread_info *tp,
902 			  const struct btrace_data_pt *btrace)
903 {
904   struct btrace_thread_info *btinfo;
905   struct pt_insn_decoder *decoder;
906   struct pt_config config;
907   int level, errcode;
908 
909   if (btrace->size == 0)
910     return;
911 
912   btinfo = &tp->btrace;
913   level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
914 
915   pt_config_init(&config);
916   config.begin = btrace->data;
917   config.end = btrace->data + btrace->size;
918 
919   config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
920   config.cpu.family = btrace->config.cpu.family;
921   config.cpu.model = btrace->config.cpu.model;
922   config.cpu.stepping = btrace->config.cpu.stepping;
923 
924   errcode = pt_cpu_errata (&config.errata, &config.cpu);
925   if (errcode < 0)
926     error (_("Failed to configure the Intel Processor Trace decoder: %s."),
927 	   pt_errstr (pt_errcode (errcode)));
928 
929   decoder = pt_insn_alloc_decoder (&config);
930   if (decoder == NULL)
931     error (_("Failed to allocate the Intel Processor Trace decoder."));
932 
933   TRY
934     {
935       struct pt_image *image;
936 
937       image = pt_insn_get_image(decoder);
938       if (image == NULL)
939 	error (_("Failed to configure the Intel Processor Trace decoder."));
940 
941       errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
942       if (errcode < 0)
943 	error (_("Failed to configure the Intel Processor Trace decoder: "
944 		 "%s."), pt_errstr (pt_errcode (errcode)));
945 
946       ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
947 		     &btinfo->ngaps);
948     }
949   CATCH (error, RETURN_MASK_ALL)
950     {
951       /* Indicate a gap in the trace if we quit trace processing.  */
952       if (error.reason == RETURN_QUIT && btinfo->end != NULL)
953 	{
954 	  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
955 	  btinfo->ngaps++;
956 	}
957 
958       btrace_finalize_ftrace_pt (decoder, tp, level);
959 
960       throw_exception (error);
961     }
962   END_CATCH
963 
964   btrace_finalize_ftrace_pt (decoder, tp, level);
965 }
966 
967 #else /* defined (HAVE_LIBIPT)  */
968 
969 static void
970 btrace_compute_ftrace_pt (struct thread_info *tp,
971 			  const struct btrace_data_pt *btrace)
972 {
973   internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
974 }
975 
976 #endif /* defined (HAVE_LIBIPT)  */
977 
978 /* Compute the function branch trace from a block branch trace BTRACE for
979    a thread given by BTINFO.  */
980 
981 static void
982 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
983 {
984   DEBUG ("compute ftrace");
985 
986   switch (btrace->format)
987     {
988     case BTRACE_FORMAT_NONE:
989       return;
990 
991     case BTRACE_FORMAT_BTS:
992       btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
993       return;
994 
995     case BTRACE_FORMAT_PT:
996       btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
997       return;
998     }
999 
1000   internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1001 }
1002 
1003 /* Add an entry for the current PC.  */
1004 
1005 static void
1006 btrace_add_pc (struct thread_info *tp)
1007 {
1008   struct btrace_data btrace;
1009   struct btrace_block *block;
1010   struct regcache *regcache;
1011   struct cleanup *cleanup;
1012   CORE_ADDR pc;
1013 
1014   regcache = get_thread_regcache (tp->ptid);
1015   pc = regcache_read_pc (regcache);
1016 
1017   btrace_data_init (&btrace);
1018   btrace.format = BTRACE_FORMAT_BTS;
1019   btrace.variant.bts.blocks = NULL;
1020 
1021   cleanup = make_cleanup_btrace_data (&btrace);
1022 
1023   block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1024   block->begin = pc;
1025   block->end = pc;
1026 
1027   btrace_compute_ftrace (tp, &btrace);
1028 
1029   do_cleanups (cleanup);
1030 }
1031 
1032 /* See btrace.h.  */
1033 
1034 void
1035 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1036 {
1037   if (tp->btrace.target != NULL)
1038     return;
1039 
1040 #if !defined (HAVE_LIBIPT)
1041   if (conf->format == BTRACE_FORMAT_PT)
1042     error (_("GDB does not support Intel Processor Trace."));
1043 #endif /* !defined (HAVE_LIBIPT) */
1044 
1045   if (!target_supports_btrace (conf->format))
1046     error (_("Target does not support branch tracing."));
1047 
1048   DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1049 	 target_pid_to_str (tp->ptid));
1050 
1051   tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1052 
1053   /* Add an entry for the current PC so we start tracing from where we
1054      enabled it.  */
1055   if (tp->btrace.target != NULL)
1056     btrace_add_pc (tp);
1057 }
1058 
1059 /* See btrace.h.  */
1060 
1061 const struct btrace_config *
1062 btrace_conf (const struct btrace_thread_info *btinfo)
1063 {
1064   if (btinfo->target == NULL)
1065     return NULL;
1066 
1067   return target_btrace_conf (btinfo->target);
1068 }
1069 
1070 /* See btrace.h.  */
1071 
1072 void
1073 btrace_disable (struct thread_info *tp)
1074 {
1075   struct btrace_thread_info *btp = &tp->btrace;
1076   int errcode = 0;
1077 
1078   if (btp->target == NULL)
1079     return;
1080 
1081   DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1082 	 target_pid_to_str (tp->ptid));
1083 
1084   target_disable_btrace (btp->target);
1085   btp->target = NULL;
1086 
1087   btrace_clear (tp);
1088 }
1089 
1090 /* See btrace.h.  */
1091 
1092 void
1093 btrace_teardown (struct thread_info *tp)
1094 {
1095   struct btrace_thread_info *btp = &tp->btrace;
1096   int errcode = 0;
1097 
1098   if (btp->target == NULL)
1099     return;
1100 
1101   DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1102 	 target_pid_to_str (tp->ptid));
1103 
1104   target_teardown_btrace (btp->target);
1105   btp->target = NULL;
1106 
1107   btrace_clear (tp);
1108 }
1109 
1110 /* Stitch branch trace in BTS format.  */
1111 
1112 static int
1113 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1114 {
1115   struct btrace_thread_info *btinfo;
1116   struct btrace_function *last_bfun;
1117   struct btrace_insn *last_insn;
1118   btrace_block_s *first_new_block;
1119 
1120   btinfo = &tp->btrace;
1121   last_bfun = btinfo->end;
1122   gdb_assert (last_bfun != NULL);
1123   gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1124 
1125   /* If the existing trace ends with a gap, we just glue the traces
1126      together.  We need to drop the last (i.e. chronologically first) block
1127      of the new trace,  though, since we can't fill in the start address.*/
1128   if (VEC_empty (btrace_insn_s, last_bfun->insn))
1129     {
1130       VEC_pop (btrace_block_s, btrace->blocks);
1131       return 0;
1132     }
1133 
1134   /* Beware that block trace starts with the most recent block, so the
1135      chronologically first block in the new trace is the last block in
1136      the new trace's block vector.  */
1137   first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1138   last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1139 
1140   /* If the current PC at the end of the block is the same as in our current
1141      trace, there are two explanations:
1142        1. we executed the instruction and some branch brought us back.
1143        2. we have not made any progress.
1144      In the first case, the delta trace vector should contain at least two
1145      entries.
1146      In the second case, the delta trace vector should contain exactly one
1147      entry for the partial block containing the current PC.  Remove it.  */
1148   if (first_new_block->end == last_insn->pc
1149       && VEC_length (btrace_block_s, btrace->blocks) == 1)
1150     {
1151       VEC_pop (btrace_block_s, btrace->blocks);
1152       return 0;
1153     }
1154 
1155   DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1156 	 core_addr_to_string_nz (first_new_block->end));
1157 
1158   /* Do a simple sanity check to make sure we don't accidentally end up
1159      with a bad block.  This should not occur in practice.  */
1160   if (first_new_block->end < last_insn->pc)
1161     {
1162       warning (_("Error while trying to read delta trace.  Falling back to "
1163 		 "a full read."));
1164       return -1;
1165     }
1166 
1167   /* We adjust the last block to start at the end of our current trace.  */
1168   gdb_assert (first_new_block->begin == 0);
1169   first_new_block->begin = last_insn->pc;
1170 
1171   /* We simply pop the last insn so we can insert it again as part of
1172      the normal branch trace computation.
1173      Since instruction iterators are based on indices in the instructions
1174      vector, we don't leave any pointers dangling.  */
1175   DEBUG ("pruning insn at %s for stitching",
1176 	 ftrace_print_insn_addr (last_insn));
1177 
1178   VEC_pop (btrace_insn_s, last_bfun->insn);
1179 
1180   /* The instructions vector may become empty temporarily if this has
1181      been the only instruction in this function segment.
1182      This violates the invariant but will be remedied shortly by
1183      btrace_compute_ftrace when we add the new trace.  */
1184 
1185   /* The only case where this would hurt is if the entire trace consisted
1186      of just that one instruction.  If we remove it, we might turn the now
1187      empty btrace function segment into a gap.  But we don't want gaps at
1188      the beginning.  To avoid this, we remove the entire old trace.  */
1189   if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1190     btrace_clear (tp);
1191 
1192   return 0;
1193 }
1194 
1195 /* Adjust the block trace in order to stitch old and new trace together.
1196    BTRACE is the new delta trace between the last and the current stop.
1197    TP is the traced thread.
1198    May modifx BTRACE as well as the existing trace in TP.
1199    Return 0 on success, -1 otherwise.  */
1200 
1201 static int
1202 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1203 {
1204   /* If we don't have trace, there's nothing to do.  */
1205   if (btrace_data_empty (btrace))
1206     return 0;
1207 
1208   switch (btrace->format)
1209     {
1210     case BTRACE_FORMAT_NONE:
1211       return 0;
1212 
1213     case BTRACE_FORMAT_BTS:
1214       return btrace_stitch_bts (&btrace->variant.bts, tp);
1215 
1216     case BTRACE_FORMAT_PT:
1217       /* Delta reads are not supported.  */
1218       return -1;
1219     }
1220 
1221   internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1222 }
1223 
1224 /* Clear the branch trace histories in BTINFO.  */
1225 
1226 static void
1227 btrace_clear_history (struct btrace_thread_info *btinfo)
1228 {
1229   xfree (btinfo->insn_history);
1230   xfree (btinfo->call_history);
1231   xfree (btinfo->replay);
1232 
1233   btinfo->insn_history = NULL;
1234   btinfo->call_history = NULL;
1235   btinfo->replay = NULL;
1236 }
1237 
1238 /* Clear the branch trace maintenance histories in BTINFO.  */
1239 
1240 static void
1241 btrace_maint_clear (struct btrace_thread_info *btinfo)
1242 {
1243   switch (btinfo->data.format)
1244     {
1245     default:
1246       break;
1247 
1248     case BTRACE_FORMAT_BTS:
1249       btinfo->maint.variant.bts.packet_history.begin = 0;
1250       btinfo->maint.variant.bts.packet_history.end = 0;
1251       break;
1252 
1253 #if defined (HAVE_LIBIPT)
1254     case BTRACE_FORMAT_PT:
1255       xfree (btinfo->maint.variant.pt.packets);
1256 
1257       btinfo->maint.variant.pt.packets = NULL;
1258       btinfo->maint.variant.pt.packet_history.begin = 0;
1259       btinfo->maint.variant.pt.packet_history.end = 0;
1260       break;
1261 #endif /* defined (HAVE_LIBIPT)  */
1262     }
1263 }
1264 
1265 /* See btrace.h.  */
1266 
1267 void
1268 btrace_fetch (struct thread_info *tp)
1269 {
1270   struct btrace_thread_info *btinfo;
1271   struct btrace_target_info *tinfo;
1272   struct btrace_data btrace;
1273   struct cleanup *cleanup;
1274   int errcode;
1275 
1276   DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1277 	 target_pid_to_str (tp->ptid));
1278 
1279   btinfo = &tp->btrace;
1280   tinfo = btinfo->target;
1281   if (tinfo == NULL)
1282     return;
1283 
1284   /* There's no way we could get new trace while replaying.
1285      On the other hand, delta trace would return a partial record with the
1286      current PC, which is the replay PC, not the last PC, as expected.  */
1287   if (btinfo->replay != NULL)
1288     return;
1289 
1290   btrace_data_init (&btrace);
1291   cleanup = make_cleanup_btrace_data (&btrace);
1292 
1293   /* Let's first try to extend the trace we already have.  */
1294   if (btinfo->end != NULL)
1295     {
1296       errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1297       if (errcode == 0)
1298 	{
1299 	  /* Success.  Let's try to stitch the traces together.  */
1300 	  errcode = btrace_stitch_trace (&btrace, tp);
1301 	}
1302       else
1303 	{
1304 	  /* We failed to read delta trace.  Let's try to read new trace.  */
1305 	  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1306 
1307 	  /* If we got any new trace, discard what we have.  */
1308 	  if (errcode == 0 && !btrace_data_empty (&btrace))
1309 	    btrace_clear (tp);
1310 	}
1311 
1312       /* If we were not able to read the trace, we start over.  */
1313       if (errcode != 0)
1314 	{
1315 	  btrace_clear (tp);
1316 	  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1317 	}
1318     }
1319   else
1320     errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1321 
1322   /* If we were not able to read the branch trace, signal an error.  */
1323   if (errcode != 0)
1324     error (_("Failed to read branch trace."));
1325 
1326   /* Compute the trace, provided we have any.  */
1327   if (!btrace_data_empty (&btrace))
1328     {
1329       /* Store the raw trace data.  The stored data will be cleared in
1330 	 btrace_clear, so we always append the new trace.  */
1331       btrace_data_append (&btinfo->data, &btrace);
1332       btrace_maint_clear (btinfo);
1333 
1334       btrace_clear_history (btinfo);
1335       btrace_compute_ftrace (tp, &btrace);
1336     }
1337 
1338   do_cleanups (cleanup);
1339 }
1340 
1341 /* See btrace.h.  */
1342 
1343 void
1344 btrace_clear (struct thread_info *tp)
1345 {
1346   struct btrace_thread_info *btinfo;
1347   struct btrace_function *it, *trash;
1348 
1349   DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1350 	 target_pid_to_str (tp->ptid));
1351 
1352   /* Make sure btrace frames that may hold a pointer into the branch
1353      trace data are destroyed.  */
1354   reinit_frame_cache ();
1355 
1356   btinfo = &tp->btrace;
1357 
1358   it = btinfo->begin;
1359   while (it != NULL)
1360     {
1361       trash = it;
1362       it = it->flow.next;
1363 
1364       xfree (trash);
1365     }
1366 
1367   btinfo->begin = NULL;
1368   btinfo->end = NULL;
1369   btinfo->ngaps = 0;
1370 
1371   /* Must clear the maint data before - it depends on BTINFO->DATA.  */
1372   btrace_maint_clear (btinfo);
1373   btrace_data_clear (&btinfo->data);
1374   btrace_clear_history (btinfo);
1375 }
1376 
1377 /* See btrace.h.  */
1378 
1379 void
1380 btrace_free_objfile (struct objfile *objfile)
1381 {
1382   struct thread_info *tp;
1383 
1384   DEBUG ("free objfile");
1385 
1386   ALL_NON_EXITED_THREADS (tp)
1387     btrace_clear (tp);
1388 }
1389 
1390 #if defined (HAVE_LIBEXPAT)
1391 
1392 /* Check the btrace document version.  */
1393 
1394 static void
1395 check_xml_btrace_version (struct gdb_xml_parser *parser,
1396 			  const struct gdb_xml_element *element,
1397 			  void *user_data, VEC (gdb_xml_value_s) *attributes)
1398 {
1399   const char *version
1400     = (const char *) xml_find_attribute (attributes, "version")->value;
1401 
1402   if (strcmp (version, "1.0") != 0)
1403     gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1404 }
1405 
1406 /* Parse a btrace "block" xml record.  */
1407 
1408 static void
1409 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1410 			const struct gdb_xml_element *element,
1411 			void *user_data, VEC (gdb_xml_value_s) *attributes)
1412 {
1413   struct btrace_data *btrace;
1414   struct btrace_block *block;
1415   ULONGEST *begin, *end;
1416 
1417   btrace = (struct btrace_data *) user_data;
1418 
1419   switch (btrace->format)
1420     {
1421     case BTRACE_FORMAT_BTS:
1422       break;
1423 
1424     case BTRACE_FORMAT_NONE:
1425       btrace->format = BTRACE_FORMAT_BTS;
1426       btrace->variant.bts.blocks = NULL;
1427       break;
1428 
1429     default:
1430       gdb_xml_error (parser, _("Btrace format error."));
1431     }
1432 
1433   begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1434   end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1435 
1436   block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1437   block->begin = *begin;
1438   block->end = *end;
1439 }
1440 
1441 /* Parse a "raw" xml record.  */
1442 
1443 static void
1444 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1445 	       gdb_byte **pdata, size_t *psize)
1446 {
1447   struct cleanup *cleanup;
1448   gdb_byte *data, *bin;
1449   size_t len, size;
1450 
1451   len = strlen (body_text);
1452   if (len % 2 != 0)
1453     gdb_xml_error (parser, _("Bad raw data size."));
1454 
1455   size = len / 2;
1456 
1457   bin = data = (gdb_byte *) xmalloc (size);
1458   cleanup = make_cleanup (xfree, data);
1459 
1460   /* We use hex encoding - see common/rsp-low.h.  */
1461   while (len > 0)
1462     {
1463       char hi, lo;
1464 
1465       hi = *body_text++;
1466       lo = *body_text++;
1467 
1468       if (hi == 0 || lo == 0)
1469 	gdb_xml_error (parser, _("Bad hex encoding."));
1470 
1471       *bin++ = fromhex (hi) * 16 + fromhex (lo);
1472       len -= 2;
1473     }
1474 
1475   discard_cleanups (cleanup);
1476 
1477   *pdata = data;
1478   *psize = size;
1479 }
1480 
1481 /* Parse a btrace pt-config "cpu" xml record.  */
1482 
1483 static void
1484 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1485 				const struct gdb_xml_element *element,
1486 				void *user_data,
1487 				VEC (gdb_xml_value_s) *attributes)
1488 {
1489   struct btrace_data *btrace;
1490   const char *vendor;
1491   ULONGEST *family, *model, *stepping;
1492 
1493   vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1494   family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1495   model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1496   stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1497 
1498   btrace = (struct btrace_data *) user_data;
1499 
1500   if (strcmp (vendor, "GenuineIntel") == 0)
1501     btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1502 
1503   btrace->variant.pt.config.cpu.family = *family;
1504   btrace->variant.pt.config.cpu.model = *model;
1505   btrace->variant.pt.config.cpu.stepping = *stepping;
1506 }
1507 
1508 /* Parse a btrace pt "raw" xml record.  */
1509 
1510 static void
1511 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1512 			 const struct gdb_xml_element *element,
1513 			 void *user_data, const char *body_text)
1514 {
1515   struct btrace_data *btrace;
1516 
1517   btrace = (struct btrace_data *) user_data;
1518   parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1519 		 &btrace->variant.pt.size);
1520 }
1521 
1522 /* Parse a btrace "pt" xml record.  */
1523 
1524 static void
1525 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1526 		     const struct gdb_xml_element *element,
1527 		     void *user_data, VEC (gdb_xml_value_s) *attributes)
1528 {
1529   struct btrace_data *btrace;
1530 
1531   btrace = (struct btrace_data *) user_data;
1532   btrace->format = BTRACE_FORMAT_PT;
1533   btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1534   btrace->variant.pt.data = NULL;
1535   btrace->variant.pt.size = 0;
1536 }
1537 
1538 static const struct gdb_xml_attribute block_attributes[] = {
1539   { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1540   { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1541   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1542 };
1543 
1544 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1545   { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1546   { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1547   { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1548   { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1549   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1550 };
1551 
1552 static const struct gdb_xml_element btrace_pt_config_children[] = {
1553   { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1554     parse_xml_btrace_pt_config_cpu, NULL },
1555   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1556 };
1557 
1558 static const struct gdb_xml_element btrace_pt_children[] = {
1559   { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1560     NULL },
1561   { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1562   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1563 };
1564 
1565 static const struct gdb_xml_attribute btrace_attributes[] = {
1566   { "version", GDB_XML_AF_NONE, NULL, NULL },
1567   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1568 };
1569 
1570 static const struct gdb_xml_element btrace_children[] = {
1571   { "block", block_attributes, NULL,
1572     GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1573   { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1574     NULL },
1575   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1576 };
1577 
1578 static const struct gdb_xml_element btrace_elements[] = {
1579   { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1580     check_xml_btrace_version, NULL },
1581   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1582 };
1583 
1584 #endif /* defined (HAVE_LIBEXPAT) */
1585 
1586 /* See btrace.h.  */
1587 
1588 void
1589 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1590 {
1591   struct cleanup *cleanup;
1592   int errcode;
1593 
1594 #if defined (HAVE_LIBEXPAT)
1595 
1596   btrace->format = BTRACE_FORMAT_NONE;
1597 
1598   cleanup = make_cleanup_btrace_data (btrace);
1599   errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1600 				 buffer, btrace);
1601   if (errcode != 0)
1602     error (_("Error parsing branch trace."));
1603 
1604   /* Keep parse results.  */
1605   discard_cleanups (cleanup);
1606 
1607 #else  /* !defined (HAVE_LIBEXPAT) */
1608 
1609   error (_("Cannot process branch trace.  XML parsing is not supported."));
1610 
1611 #endif  /* !defined (HAVE_LIBEXPAT) */
1612 }
1613 
1614 #if defined (HAVE_LIBEXPAT)
1615 
1616 /* Parse a btrace-conf "bts" xml record.  */
1617 
1618 static void
1619 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1620 			  const struct gdb_xml_element *element,
1621 			  void *user_data, VEC (gdb_xml_value_s) *attributes)
1622 {
1623   struct btrace_config *conf;
1624   struct gdb_xml_value *size;
1625 
1626   conf = (struct btrace_config *) user_data;
1627   conf->format = BTRACE_FORMAT_BTS;
1628   conf->bts.size = 0;
1629 
1630   size = xml_find_attribute (attributes, "size");
1631   if (size != NULL)
1632     conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1633 }
1634 
1635 /* Parse a btrace-conf "pt" xml record.  */
1636 
1637 static void
1638 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1639 			  const struct gdb_xml_element *element,
1640 			  void *user_data, VEC (gdb_xml_value_s) *attributes)
1641 {
1642   struct btrace_config *conf;
1643   struct gdb_xml_value *size;
1644 
1645   conf = (struct btrace_config *) user_data;
1646   conf->format = BTRACE_FORMAT_PT;
1647   conf->pt.size = 0;
1648 
1649   size = xml_find_attribute (attributes, "size");
1650   if (size != NULL)
1651     conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1652 }
1653 
1654 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1655   { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1656   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1657 };
1658 
1659 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1660   { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1661   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1662 };
1663 
1664 static const struct gdb_xml_element btrace_conf_children[] = {
1665   { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1666     parse_xml_btrace_conf_bts, NULL },
1667   { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1668     parse_xml_btrace_conf_pt, NULL },
1669   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1670 };
1671 
1672 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1673   { "version", GDB_XML_AF_NONE, NULL, NULL },
1674   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1675 };
1676 
1677 static const struct gdb_xml_element btrace_conf_elements[] = {
1678   { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1679     GDB_XML_EF_NONE, NULL, NULL },
1680   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1681 };
1682 
1683 #endif /* defined (HAVE_LIBEXPAT) */
1684 
1685 /* See btrace.h.  */
1686 
1687 void
1688 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1689 {
1690   int errcode;
1691 
1692 #if defined (HAVE_LIBEXPAT)
1693 
1694   errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1695 				 btrace_conf_elements, xml, conf);
1696   if (errcode != 0)
1697     error (_("Error parsing branch trace configuration."));
1698 
1699 #else  /* !defined (HAVE_LIBEXPAT) */
1700 
1701   error (_("XML parsing is not supported."));
1702 
1703 #endif  /* !defined (HAVE_LIBEXPAT) */
1704 }
1705 
1706 /* See btrace.h.  */
1707 
1708 const struct btrace_insn *
1709 btrace_insn_get (const struct btrace_insn_iterator *it)
1710 {
1711   const struct btrace_function *bfun;
1712   unsigned int index, end;
1713 
1714   index = it->index;
1715   bfun = it->function;
1716 
1717   /* Check if the iterator points to a gap in the trace.  */
1718   if (bfun->errcode != 0)
1719     return NULL;
1720 
1721   /* The index is within the bounds of this function's instruction vector.  */
1722   end = VEC_length (btrace_insn_s, bfun->insn);
1723   gdb_assert (0 < end);
1724   gdb_assert (index < end);
1725 
1726   return VEC_index (btrace_insn_s, bfun->insn, index);
1727 }
1728 
1729 /* See btrace.h.  */
1730 
1731 unsigned int
1732 btrace_insn_number (const struct btrace_insn_iterator *it)
1733 {
1734   const struct btrace_function *bfun;
1735 
1736   bfun = it->function;
1737 
1738   /* Return zero if the iterator points to a gap in the trace.  */
1739   if (bfun->errcode != 0)
1740     return 0;
1741 
1742   return bfun->insn_offset + it->index;
1743 }
1744 
1745 /* See btrace.h.  */
1746 
1747 void
1748 btrace_insn_begin (struct btrace_insn_iterator *it,
1749 		   const struct btrace_thread_info *btinfo)
1750 {
1751   const struct btrace_function *bfun;
1752 
1753   bfun = btinfo->begin;
1754   if (bfun == NULL)
1755     error (_("No trace."));
1756 
1757   it->function = bfun;
1758   it->index = 0;
1759 }
1760 
1761 /* See btrace.h.  */
1762 
1763 void
1764 btrace_insn_end (struct btrace_insn_iterator *it,
1765 		 const struct btrace_thread_info *btinfo)
1766 {
1767   const struct btrace_function *bfun;
1768   unsigned int length;
1769 
1770   bfun = btinfo->end;
1771   if (bfun == NULL)
1772     error (_("No trace."));
1773 
1774   length = VEC_length (btrace_insn_s, bfun->insn);
1775 
1776   /* The last function may either be a gap or it contains the current
1777      instruction, which is one past the end of the execution trace; ignore
1778      it.  */
1779   if (length > 0)
1780     length -= 1;
1781 
1782   it->function = bfun;
1783   it->index = length;
1784 }
1785 
1786 /* See btrace.h.  */
1787 
1788 unsigned int
1789 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1790 {
1791   const struct btrace_function *bfun;
1792   unsigned int index, steps;
1793 
1794   bfun = it->function;
1795   steps = 0;
1796   index = it->index;
1797 
1798   while (stride != 0)
1799     {
1800       unsigned int end, space, adv;
1801 
1802       end = VEC_length (btrace_insn_s, bfun->insn);
1803 
1804       /* An empty function segment represents a gap in the trace.  We count
1805 	 it as one instruction.  */
1806       if (end == 0)
1807 	{
1808 	  const struct btrace_function *next;
1809 
1810 	  next = bfun->flow.next;
1811 	  if (next == NULL)
1812 	    break;
1813 
1814 	  stride -= 1;
1815 	  steps += 1;
1816 
1817 	  bfun = next;
1818 	  index = 0;
1819 
1820 	  continue;
1821 	}
1822 
1823       gdb_assert (0 < end);
1824       gdb_assert (index < end);
1825 
1826       /* Compute the number of instructions remaining in this segment.  */
1827       space = end - index;
1828 
1829       /* Advance the iterator as far as possible within this segment.  */
1830       adv = min (space, stride);
1831       stride -= adv;
1832       index += adv;
1833       steps += adv;
1834 
1835       /* Move to the next function if we're at the end of this one.  */
1836       if (index == end)
1837 	{
1838 	  const struct btrace_function *next;
1839 
1840 	  next = bfun->flow.next;
1841 	  if (next == NULL)
1842 	    {
1843 	      /* We stepped past the last function.
1844 
1845 		 Let's adjust the index to point to the last instruction in
1846 		 the previous function.  */
1847 	      index -= 1;
1848 	      steps -= 1;
1849 	      break;
1850 	    }
1851 
1852 	  /* We now point to the first instruction in the new function.  */
1853 	  bfun = next;
1854 	  index = 0;
1855 	}
1856 
1857       /* We did make progress.  */
1858       gdb_assert (adv > 0);
1859     }
1860 
1861   /* Update the iterator.  */
1862   it->function = bfun;
1863   it->index = index;
1864 
1865   return steps;
1866 }
1867 
1868 /* See btrace.h.  */
1869 
1870 unsigned int
1871 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1872 {
1873   const struct btrace_function *bfun;
1874   unsigned int index, steps;
1875 
1876   bfun = it->function;
1877   steps = 0;
1878   index = it->index;
1879 
1880   while (stride != 0)
1881     {
1882       unsigned int adv;
1883 
1884       /* Move to the previous function if we're at the start of this one.  */
1885       if (index == 0)
1886 	{
1887 	  const struct btrace_function *prev;
1888 
1889 	  prev = bfun->flow.prev;
1890 	  if (prev == NULL)
1891 	    break;
1892 
1893 	  /* We point to one after the last instruction in the new function.  */
1894 	  bfun = prev;
1895 	  index = VEC_length (btrace_insn_s, bfun->insn);
1896 
1897 	  /* An empty function segment represents a gap in the trace.  We count
1898 	     it as one instruction.  */
1899 	  if (index == 0)
1900 	    {
1901 	      stride -= 1;
1902 	      steps += 1;
1903 
1904 	      continue;
1905 	    }
1906 	}
1907 
1908       /* Advance the iterator as far as possible within this segment.  */
1909       adv = min (index, stride);
1910 
1911       stride -= adv;
1912       index -= adv;
1913       steps += adv;
1914 
1915       /* We did make progress.  */
1916       gdb_assert (adv > 0);
1917     }
1918 
1919   /* Update the iterator.  */
1920   it->function = bfun;
1921   it->index = index;
1922 
1923   return steps;
1924 }
1925 
1926 /* See btrace.h.  */
1927 
1928 int
1929 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1930 		 const struct btrace_insn_iterator *rhs)
1931 {
1932   unsigned int lnum, rnum;
1933 
1934   lnum = btrace_insn_number (lhs);
1935   rnum = btrace_insn_number (rhs);
1936 
1937   /* A gap has an instruction number of zero.  Things are getting more
1938      complicated if gaps are involved.
1939 
1940      We take the instruction number offset from the iterator's function.
1941      This is the number of the first instruction after the gap.
1942 
1943      This is OK as long as both lhs and rhs point to gaps.  If only one of
1944      them does, we need to adjust the number based on the other's regular
1945      instruction number.  Otherwise, a gap might compare equal to an
1946      instruction.  */
1947 
1948   if (lnum == 0 && rnum == 0)
1949     {
1950       lnum = lhs->function->insn_offset;
1951       rnum = rhs->function->insn_offset;
1952     }
1953   else if (lnum == 0)
1954     {
1955       lnum = lhs->function->insn_offset;
1956 
1957       if (lnum == rnum)
1958 	lnum -= 1;
1959     }
1960   else if (rnum == 0)
1961     {
1962       rnum = rhs->function->insn_offset;
1963 
1964       if (rnum == lnum)
1965 	rnum -= 1;
1966     }
1967 
1968   return (int) (lnum - rnum);
1969 }
1970 
1971 /* See btrace.h.  */
1972 
1973 int
1974 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1975 			    const struct btrace_thread_info *btinfo,
1976 			    unsigned int number)
1977 {
1978   const struct btrace_function *bfun;
1979   unsigned int end, length;
1980 
1981   for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1982     {
1983       /* Skip gaps. */
1984       if (bfun->errcode != 0)
1985 	continue;
1986 
1987       if (bfun->insn_offset <= number)
1988 	break;
1989     }
1990 
1991   if (bfun == NULL)
1992     return 0;
1993 
1994   length = VEC_length (btrace_insn_s, bfun->insn);
1995   gdb_assert (length > 0);
1996 
1997   end = bfun->insn_offset + length;
1998   if (end <= number)
1999     return 0;
2000 
2001   it->function = bfun;
2002   it->index = number - bfun->insn_offset;
2003 
2004   return 1;
2005 }
2006 
2007 /* See btrace.h.  */
2008 
2009 const struct btrace_function *
2010 btrace_call_get (const struct btrace_call_iterator *it)
2011 {
2012   return it->function;
2013 }
2014 
2015 /* See btrace.h.  */
2016 
2017 unsigned int
2018 btrace_call_number (const struct btrace_call_iterator *it)
2019 {
2020   const struct btrace_thread_info *btinfo;
2021   const struct btrace_function *bfun;
2022   unsigned int insns;
2023 
2024   btinfo = it->btinfo;
2025   bfun = it->function;
2026   if (bfun != NULL)
2027     return bfun->number;
2028 
2029   /* For the end iterator, i.e. bfun == NULL, we return one more than the
2030      number of the last function.  */
2031   bfun = btinfo->end;
2032   insns = VEC_length (btrace_insn_s, bfun->insn);
2033 
2034   /* If the function contains only a single instruction (i.e. the current
2035      instruction), it will be skipped and its number is already the number
2036      we seek.  */
2037   if (insns == 1)
2038     return bfun->number;
2039 
2040   /* Otherwise, return one more than the number of the last function.  */
2041   return bfun->number + 1;
2042 }
2043 
2044 /* See btrace.h.  */
2045 
2046 void
2047 btrace_call_begin (struct btrace_call_iterator *it,
2048 		   const struct btrace_thread_info *btinfo)
2049 {
2050   const struct btrace_function *bfun;
2051 
2052   bfun = btinfo->begin;
2053   if (bfun == NULL)
2054     error (_("No trace."));
2055 
2056   it->btinfo = btinfo;
2057   it->function = bfun;
2058 }
2059 
2060 /* See btrace.h.  */
2061 
2062 void
2063 btrace_call_end (struct btrace_call_iterator *it,
2064 		 const struct btrace_thread_info *btinfo)
2065 {
2066   const struct btrace_function *bfun;
2067 
2068   bfun = btinfo->end;
2069   if (bfun == NULL)
2070     error (_("No trace."));
2071 
2072   it->btinfo = btinfo;
2073   it->function = NULL;
2074 }
2075 
2076 /* See btrace.h.  */
2077 
2078 unsigned int
2079 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2080 {
2081   const struct btrace_function *bfun;
2082   unsigned int steps;
2083 
2084   bfun = it->function;
2085   steps = 0;
2086   while (bfun != NULL)
2087     {
2088       const struct btrace_function *next;
2089       unsigned int insns;
2090 
2091       next = bfun->flow.next;
2092       if (next == NULL)
2093 	{
2094 	  /* Ignore the last function if it only contains a single
2095 	     (i.e. the current) instruction.  */
2096 	  insns = VEC_length (btrace_insn_s, bfun->insn);
2097 	  if (insns == 1)
2098 	    steps -= 1;
2099 	}
2100 
2101       if (stride == steps)
2102 	break;
2103 
2104       bfun = next;
2105       steps += 1;
2106     }
2107 
2108   it->function = bfun;
2109   return steps;
2110 }
2111 
2112 /* See btrace.h.  */
2113 
2114 unsigned int
2115 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2116 {
2117   const struct btrace_thread_info *btinfo;
2118   const struct btrace_function *bfun;
2119   unsigned int steps;
2120 
2121   bfun = it->function;
2122   steps = 0;
2123 
2124   if (bfun == NULL)
2125     {
2126       unsigned int insns;
2127 
2128       btinfo = it->btinfo;
2129       bfun = btinfo->end;
2130       if (bfun == NULL)
2131 	return 0;
2132 
2133       /* Ignore the last function if it only contains a single
2134 	 (i.e. the current) instruction.  */
2135       insns = VEC_length (btrace_insn_s, bfun->insn);
2136       if (insns == 1)
2137 	bfun = bfun->flow.prev;
2138 
2139       if (bfun == NULL)
2140 	return 0;
2141 
2142       steps += 1;
2143     }
2144 
2145   while (steps < stride)
2146     {
2147       const struct btrace_function *prev;
2148 
2149       prev = bfun->flow.prev;
2150       if (prev == NULL)
2151 	break;
2152 
2153       bfun = prev;
2154       steps += 1;
2155     }
2156 
2157   it->function = bfun;
2158   return steps;
2159 }
2160 
2161 /* See btrace.h.  */
2162 
2163 int
2164 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2165 		 const struct btrace_call_iterator *rhs)
2166 {
2167   unsigned int lnum, rnum;
2168 
2169   lnum = btrace_call_number (lhs);
2170   rnum = btrace_call_number (rhs);
2171 
2172   return (int) (lnum - rnum);
2173 }
2174 
2175 /* See btrace.h.  */
2176 
2177 int
2178 btrace_find_call_by_number (struct btrace_call_iterator *it,
2179 			    const struct btrace_thread_info *btinfo,
2180 			    unsigned int number)
2181 {
2182   const struct btrace_function *bfun;
2183 
2184   for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2185     {
2186       unsigned int bnum;
2187 
2188       bnum = bfun->number;
2189       if (number == bnum)
2190 	{
2191 	  it->btinfo = btinfo;
2192 	  it->function = bfun;
2193 	  return 1;
2194 	}
2195 
2196       /* Functions are ordered and numbered consecutively.  We could bail out
2197 	 earlier.  On the other hand, it is very unlikely that we search for
2198 	 a nonexistent function.  */
2199   }
2200 
2201   return 0;
2202 }
2203 
2204 /* See btrace.h.  */
2205 
2206 void
2207 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2208 			 const struct btrace_insn_iterator *begin,
2209 			 const struct btrace_insn_iterator *end)
2210 {
2211   if (btinfo->insn_history == NULL)
2212     btinfo->insn_history = XCNEW (struct btrace_insn_history);
2213 
2214   btinfo->insn_history->begin = *begin;
2215   btinfo->insn_history->end = *end;
2216 }
2217 
2218 /* See btrace.h.  */
2219 
2220 void
2221 btrace_set_call_history (struct btrace_thread_info *btinfo,
2222 			 const struct btrace_call_iterator *begin,
2223 			 const struct btrace_call_iterator *end)
2224 {
2225   gdb_assert (begin->btinfo == end->btinfo);
2226 
2227   if (btinfo->call_history == NULL)
2228     btinfo->call_history = XCNEW (struct btrace_call_history);
2229 
2230   btinfo->call_history->begin = *begin;
2231   btinfo->call_history->end = *end;
2232 }
2233 
2234 /* See btrace.h.  */
2235 
2236 int
2237 btrace_is_replaying (struct thread_info *tp)
2238 {
2239   return tp->btrace.replay != NULL;
2240 }
2241 
2242 /* See btrace.h.  */
2243 
2244 int
2245 btrace_is_empty (struct thread_info *tp)
2246 {
2247   struct btrace_insn_iterator begin, end;
2248   struct btrace_thread_info *btinfo;
2249 
2250   btinfo = &tp->btrace;
2251 
2252   if (btinfo->begin == NULL)
2253     return 1;
2254 
2255   btrace_insn_begin (&begin, btinfo);
2256   btrace_insn_end (&end, btinfo);
2257 
2258   return btrace_insn_cmp (&begin, &end) == 0;
2259 }
2260 
2261 /* Forward the cleanup request.  */
2262 
2263 static void
2264 do_btrace_data_cleanup (void *arg)
2265 {
2266   btrace_data_fini ((struct btrace_data *) arg);
2267 }
2268 
2269 /* See btrace.h.  */
2270 
2271 struct cleanup *
2272 make_cleanup_btrace_data (struct btrace_data *data)
2273 {
2274   return make_cleanup (do_btrace_data_cleanup, data);
2275 }
2276 
2277 #if defined (HAVE_LIBIPT)
2278 
2279 /* Print a single packet.  */
2280 
2281 static void
2282 pt_print_packet (const struct pt_packet *packet)
2283 {
2284   switch (packet->type)
2285     {
2286     default:
2287       printf_unfiltered (("[??: %x]"), packet->type);
2288       break;
2289 
2290     case ppt_psb:
2291       printf_unfiltered (("psb"));
2292       break;
2293 
2294     case ppt_psbend:
2295       printf_unfiltered (("psbend"));
2296       break;
2297 
2298     case ppt_pad:
2299       printf_unfiltered (("pad"));
2300       break;
2301 
2302     case ppt_tip:
2303       printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2304 			 packet->payload.ip.ipc,
2305 			 packet->payload.ip.ip);
2306       break;
2307 
2308     case ppt_tip_pge:
2309       printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2310 			 packet->payload.ip.ipc,
2311 			 packet->payload.ip.ip);
2312       break;
2313 
2314     case ppt_tip_pgd:
2315       printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2316 			 packet->payload.ip.ipc,
2317 			 packet->payload.ip.ip);
2318       break;
2319 
2320     case ppt_fup:
2321       printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2322 			 packet->payload.ip.ipc,
2323 			 packet->payload.ip.ip);
2324       break;
2325 
2326     case ppt_tnt_8:
2327       printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2328 			 packet->payload.tnt.bit_size,
2329 			 packet->payload.tnt.payload);
2330       break;
2331 
2332     case ppt_tnt_64:
2333       printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2334 			 packet->payload.tnt.bit_size,
2335 			 packet->payload.tnt.payload);
2336       break;
2337 
2338     case ppt_pip:
2339       printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2340 			 packet->payload.pip.nr ? (" nr") : (""));
2341       break;
2342 
2343     case ppt_tsc:
2344       printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2345       break;
2346 
2347     case ppt_cbr:
2348       printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2349       break;
2350 
2351     case ppt_mode:
2352       switch (packet->payload.mode.leaf)
2353 	{
2354 	default:
2355 	  printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2356 	  break;
2357 
2358 	case pt_mol_exec:
2359 	  printf_unfiltered (("mode.exec%s%s"),
2360 			     packet->payload.mode.bits.exec.csl
2361 			     ? (" cs.l") : (""),
2362 			     packet->payload.mode.bits.exec.csd
2363 			     ? (" cs.d") : (""));
2364 	  break;
2365 
2366 	case pt_mol_tsx:
2367 	  printf_unfiltered (("mode.tsx%s%s"),
2368 			     packet->payload.mode.bits.tsx.intx
2369 			     ? (" intx") : (""),
2370 			     packet->payload.mode.bits.tsx.abrt
2371 			     ? (" abrt") : (""));
2372 	  break;
2373 	}
2374       break;
2375 
2376     case ppt_ovf:
2377       printf_unfiltered (("ovf"));
2378       break;
2379 
2380     case ppt_stop:
2381       printf_unfiltered (("stop"));
2382       break;
2383 
2384     case ppt_vmcs:
2385       printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2386       break;
2387 
2388     case ppt_tma:
2389       printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2390 			 packet->payload.tma.fc);
2391       break;
2392 
2393     case ppt_mtc:
2394       printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2395       break;
2396 
2397     case ppt_cyc:
2398       printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2399       break;
2400 
2401     case ppt_mnt:
2402       printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2403       break;
2404     }
2405 }
2406 
2407 /* Decode packets into MAINT using DECODER.  */
2408 
2409 static void
2410 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2411 			struct pt_packet_decoder *decoder)
2412 {
2413   int errcode;
2414 
2415   for (;;)
2416     {
2417       struct btrace_pt_packet packet;
2418 
2419       errcode = pt_pkt_sync_forward (decoder);
2420       if (errcode < 0)
2421 	break;
2422 
2423       for (;;)
2424 	{
2425 	  pt_pkt_get_offset (decoder, &packet.offset);
2426 
2427 	  errcode = pt_pkt_next (decoder, &packet.packet,
2428 				 sizeof(packet.packet));
2429 	  if (errcode < 0)
2430 	    break;
2431 
2432 	  if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2433 	    {
2434 	      packet.errcode = pt_errcode (errcode);
2435 	      VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2436 			     &packet);
2437 	    }
2438 	}
2439 
2440       if (errcode == -pte_eos)
2441 	break;
2442 
2443       packet.errcode = pt_errcode (errcode);
2444       VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2445 		     &packet);
2446 
2447       warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2448 	       packet.offset, pt_errstr (packet.errcode));
2449     }
2450 
2451   if (errcode != -pte_eos)
2452     warning (_("Failed to synchronize onto the Intel Processor Trace "
2453 	       "stream: %s."), pt_errstr (pt_errcode (errcode)));
2454 }
2455 
2456 /* Update the packet history in BTINFO.  */
2457 
2458 static void
2459 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2460 {
2461   volatile struct gdb_exception except;
2462   struct pt_packet_decoder *decoder;
2463   struct btrace_data_pt *pt;
2464   struct pt_config config;
2465   int errcode;
2466 
2467   pt = &btinfo->data.variant.pt;
2468 
2469   /* Nothing to do if there is no trace.  */
2470   if (pt->size == 0)
2471     return;
2472 
2473   memset (&config, 0, sizeof(config));
2474 
2475   config.size = sizeof (config);
2476   config.begin = pt->data;
2477   config.end = pt->data + pt->size;
2478 
2479   config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2480   config.cpu.family = pt->config.cpu.family;
2481   config.cpu.model = pt->config.cpu.model;
2482   config.cpu.stepping = pt->config.cpu.stepping;
2483 
2484   errcode = pt_cpu_errata (&config.errata, &config.cpu);
2485   if (errcode < 0)
2486     error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2487 	   pt_errstr (pt_errcode (errcode)));
2488 
2489   decoder = pt_pkt_alloc_decoder (&config);
2490   if (decoder == NULL)
2491     error (_("Failed to allocate the Intel Processor Trace decoder."));
2492 
2493   TRY
2494     {
2495       btrace_maint_decode_pt (&btinfo->maint, decoder);
2496     }
2497   CATCH (except, RETURN_MASK_ALL)
2498     {
2499       pt_pkt_free_decoder (decoder);
2500 
2501       if (except.reason < 0)
2502 	throw_exception (except);
2503     }
2504   END_CATCH
2505 
2506   pt_pkt_free_decoder (decoder);
2507 }
2508 
2509 #endif /* !defined (HAVE_LIBIPT)  */
2510 
2511 /* Update the packet maintenance information for BTINFO and store the
2512    low and high bounds into BEGIN and END, respectively.
2513    Store the current iterator state into FROM and TO.  */
2514 
2515 static void
2516 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2517 			     unsigned int *begin, unsigned int *end,
2518 			     unsigned int *from, unsigned int *to)
2519 {
2520   switch (btinfo->data.format)
2521     {
2522     default:
2523       *begin = 0;
2524       *end = 0;
2525       *from = 0;
2526       *to = 0;
2527       break;
2528 
2529     case BTRACE_FORMAT_BTS:
2530       /* Nothing to do - we operate directly on BTINFO->DATA.  */
2531       *begin = 0;
2532       *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2533       *from = btinfo->maint.variant.bts.packet_history.begin;
2534       *to = btinfo->maint.variant.bts.packet_history.end;
2535       break;
2536 
2537 #if defined (HAVE_LIBIPT)
2538     case BTRACE_FORMAT_PT:
2539       if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2540 	btrace_maint_update_pt_packets (btinfo);
2541 
2542       *begin = 0;
2543       *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2544       *from = btinfo->maint.variant.pt.packet_history.begin;
2545       *to = btinfo->maint.variant.pt.packet_history.end;
2546       break;
2547 #endif /* defined (HAVE_LIBIPT)  */
2548     }
2549 }
2550 
2551 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2552    update the current iterator position.  */
2553 
2554 static void
2555 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2556 			    unsigned int begin, unsigned int end)
2557 {
2558   switch (btinfo->data.format)
2559     {
2560     default:
2561       break;
2562 
2563     case BTRACE_FORMAT_BTS:
2564       {
2565 	VEC (btrace_block_s) *blocks;
2566 	unsigned int blk;
2567 
2568 	blocks = btinfo->data.variant.bts.blocks;
2569 	for (blk = begin; blk < end; ++blk)
2570 	  {
2571 	    const btrace_block_s *block;
2572 
2573 	    block = VEC_index (btrace_block_s, blocks, blk);
2574 
2575 	    printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2576 			       core_addr_to_string_nz (block->begin),
2577 			       core_addr_to_string_nz (block->end));
2578 	  }
2579 
2580 	btinfo->maint.variant.bts.packet_history.begin = begin;
2581 	btinfo->maint.variant.bts.packet_history.end = end;
2582       }
2583       break;
2584 
2585 #if defined (HAVE_LIBIPT)
2586     case BTRACE_FORMAT_PT:
2587       {
2588 	VEC (btrace_pt_packet_s) *packets;
2589 	unsigned int pkt;
2590 
2591 	packets = btinfo->maint.variant.pt.packets;
2592 	for (pkt = begin; pkt < end; ++pkt)
2593 	  {
2594 	    const struct btrace_pt_packet *packet;
2595 
2596 	    packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2597 
2598 	    printf_unfiltered ("%u\t", pkt);
2599 	    printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2600 
2601 	    if (packet->errcode == pte_ok)
2602 	      pt_print_packet (&packet->packet);
2603 	    else
2604 	      printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2605 
2606 	    printf_unfiltered ("\n");
2607 	  }
2608 
2609 	btinfo->maint.variant.pt.packet_history.begin = begin;
2610 	btinfo->maint.variant.pt.packet_history.end = end;
2611       }
2612       break;
2613 #endif /* defined (HAVE_LIBIPT)  */
2614     }
2615 }
2616 
2617 /* Read a number from an argument string.  */
2618 
2619 static unsigned int
2620 get_uint (char **arg)
2621 {
2622   char *begin, *end, *pos;
2623   unsigned long number;
2624 
2625   begin = *arg;
2626   pos = skip_spaces (begin);
2627 
2628   if (!isdigit (*pos))
2629     error (_("Expected positive number, got: %s."), pos);
2630 
2631   number = strtoul (pos, &end, 10);
2632   if (number > UINT_MAX)
2633     error (_("Number too big."));
2634 
2635   *arg += (end - begin);
2636 
2637   return (unsigned int) number;
2638 }
2639 
2640 /* Read a context size from an argument string.  */
2641 
2642 static int
2643 get_context_size (char **arg)
2644 {
2645   char *pos;
2646   int number;
2647 
2648   pos = skip_spaces (*arg);
2649 
2650   if (!isdigit (*pos))
2651     error (_("Expected positive number, got: %s."), pos);
2652 
2653   return strtol (pos, arg, 10);
2654 }
2655 
2656 /* Complain about junk at the end of an argument string.  */
2657 
2658 static void
2659 no_chunk (char *arg)
2660 {
2661   if (*arg != 0)
2662     error (_("Junk after argument: %s."), arg);
2663 }
2664 
2665 /* The "maintenance btrace packet-history" command.  */
2666 
2667 static void
2668 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2669 {
2670   struct btrace_thread_info *btinfo;
2671   struct thread_info *tp;
2672   unsigned int size, begin, end, from, to;
2673 
2674   tp = find_thread_ptid (inferior_ptid);
2675   if (tp == NULL)
2676     error (_("No thread."));
2677 
2678   size = 10;
2679   btinfo = &tp->btrace;
2680 
2681   btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2682   if (begin == end)
2683     {
2684       printf_unfiltered (_("No trace.\n"));
2685       return;
2686     }
2687 
2688   if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2689     {
2690       from = to;
2691 
2692       if (end - from < size)
2693 	size = end - from;
2694       to = from + size;
2695     }
2696   else if (strcmp (arg, "-") == 0)
2697     {
2698       to = from;
2699 
2700       if (to - begin < size)
2701 	size = to - begin;
2702       from = to - size;
2703     }
2704   else
2705     {
2706       from = get_uint (&arg);
2707       if (end <= from)
2708 	error (_("'%u' is out of range."), from);
2709 
2710       arg = skip_spaces (arg);
2711       if (*arg == ',')
2712 	{
2713 	  arg = skip_spaces (++arg);
2714 
2715 	  if (*arg == '+')
2716 	    {
2717 	      arg += 1;
2718 	      size = get_context_size (&arg);
2719 
2720 	      no_chunk (arg);
2721 
2722 	      if (end - from < size)
2723 		size = end - from;
2724 	      to = from + size;
2725 	    }
2726 	  else if (*arg == '-')
2727 	    {
2728 	      arg += 1;
2729 	      size = get_context_size (&arg);
2730 
2731 	      no_chunk (arg);
2732 
2733 	      /* Include the packet given as first argument.  */
2734 	      from += 1;
2735 	      to = from;
2736 
2737 	      if (to - begin < size)
2738 		size = to - begin;
2739 	      from = to - size;
2740 	    }
2741 	  else
2742 	    {
2743 	      to = get_uint (&arg);
2744 
2745 	      /* Include the packet at the second argument and silently
2746 		 truncate the range.  */
2747 	      if (to < end)
2748 		to += 1;
2749 	      else
2750 		to = end;
2751 
2752 	      no_chunk (arg);
2753 	    }
2754 	}
2755       else
2756 	{
2757 	  no_chunk (arg);
2758 
2759 	  if (end - from < size)
2760 	    size = end - from;
2761 	  to = from + size;
2762 	}
2763 
2764       dont_repeat ();
2765     }
2766 
2767   btrace_maint_print_packets (btinfo, from, to);
2768 }
2769 
2770 /* The "maintenance btrace clear-packet-history" command.  */
2771 
2772 static void
2773 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2774 {
2775   struct btrace_thread_info *btinfo;
2776   struct thread_info *tp;
2777 
2778   if (args != NULL && *args != 0)
2779     error (_("Invalid argument."));
2780 
2781   tp = find_thread_ptid (inferior_ptid);
2782   if (tp == NULL)
2783     error (_("No thread."));
2784 
2785   btinfo = &tp->btrace;
2786 
2787   /* Must clear the maint data before - it depends on BTINFO->DATA.  */
2788   btrace_maint_clear (btinfo);
2789   btrace_data_clear (&btinfo->data);
2790 }
2791 
2792 /* The "maintenance btrace clear" command.  */
2793 
2794 static void
2795 maint_btrace_clear_cmd (char *args, int from_tty)
2796 {
2797   struct btrace_thread_info *btinfo;
2798   struct thread_info *tp;
2799 
2800   if (args != NULL && *args != 0)
2801     error (_("Invalid argument."));
2802 
2803   tp = find_thread_ptid (inferior_ptid);
2804   if (tp == NULL)
2805     error (_("No thread."));
2806 
2807   btrace_clear (tp);
2808 }
2809 
2810 /* The "maintenance btrace" command.  */
2811 
2812 static void
2813 maint_btrace_cmd (char *args, int from_tty)
2814 {
2815   help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2816 	     gdb_stdout);
2817 }
2818 
2819 /* The "maintenance set btrace" command.  */
2820 
2821 static void
2822 maint_btrace_set_cmd (char *args, int from_tty)
2823 {
2824   help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2825 	     gdb_stdout);
2826 }
2827 
2828 /* The "maintenance show btrace" command.  */
2829 
2830 static void
2831 maint_btrace_show_cmd (char *args, int from_tty)
2832 {
2833   help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2834 	     all_commands, gdb_stdout);
2835 }
2836 
2837 /* The "maintenance set btrace pt" command.  */
2838 
2839 static void
2840 maint_btrace_pt_set_cmd (char *args, int from_tty)
2841 {
2842   help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2843 	     all_commands, gdb_stdout);
2844 }
2845 
2846 /* The "maintenance show btrace pt" command.  */
2847 
2848 static void
2849 maint_btrace_pt_show_cmd (char *args, int from_tty)
2850 {
2851   help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2852 	     all_commands, gdb_stdout);
2853 }
2854 
2855 /* The "maintenance info btrace" command.  */
2856 
2857 static void
2858 maint_info_btrace_cmd (char *args, int from_tty)
2859 {
2860   struct btrace_thread_info *btinfo;
2861   struct thread_info *tp;
2862   const struct btrace_config *conf;
2863 
2864   if (args != NULL && *args != 0)
2865     error (_("Invalid argument."));
2866 
2867   tp = find_thread_ptid (inferior_ptid);
2868   if (tp == NULL)
2869     error (_("No thread."));
2870 
2871   btinfo = &tp->btrace;
2872 
2873   conf = btrace_conf (btinfo);
2874   if (conf == NULL)
2875     error (_("No btrace configuration."));
2876 
2877   printf_unfiltered (_("Format: %s.\n"),
2878 		     btrace_format_string (conf->format));
2879 
2880   switch (conf->format)
2881     {
2882     default:
2883       break;
2884 
2885     case BTRACE_FORMAT_BTS:
2886       printf_unfiltered (_("Number of packets: %u.\n"),
2887 			 VEC_length (btrace_block_s,
2888 				     btinfo->data.variant.bts.blocks));
2889       break;
2890 
2891 #if defined (HAVE_LIBIPT)
2892     case BTRACE_FORMAT_PT:
2893       {
2894 	struct pt_version version;
2895 
2896 	version = pt_library_version ();
2897 	printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2898 			   version.minor, version.build,
2899 			   version.ext != NULL ? version.ext : "");
2900 
2901 	btrace_maint_update_pt_packets (btinfo);
2902 	printf_unfiltered (_("Number of packets: %u.\n"),
2903 			   VEC_length (btrace_pt_packet_s,
2904 				       btinfo->maint.variant.pt.packets));
2905       }
2906       break;
2907 #endif /* defined (HAVE_LIBIPT)  */
2908     }
2909 }
2910 
2911 /* The "maint show btrace pt skip-pad" show value function. */
2912 
2913 static void
2914 show_maint_btrace_pt_skip_pad  (struct ui_file *file, int from_tty,
2915 				  struct cmd_list_element *c,
2916 				  const char *value)
2917 {
2918   fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2919 }
2920 
2921 
2922 /* Initialize btrace maintenance commands.  */
2923 
2924 void _initialize_btrace (void);
2925 void
2926 _initialize_btrace (void)
2927 {
2928   add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2929 	   _("Info about branch tracing data."), &maintenanceinfolist);
2930 
2931   add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2932 		  _("Branch tracing maintenance commands."),
2933 		  &maint_btrace_cmdlist, "maintenance btrace ",
2934 		  0, &maintenancelist);
2935 
2936   add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2937 Set branch tracing specific variables."),
2938                   &maint_btrace_set_cmdlist, "maintenance set btrace ",
2939                   0, &maintenance_set_cmdlist);
2940 
2941   add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2942 Set Intel Processor Trace specific variables."),
2943                   &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2944                   0, &maint_btrace_set_cmdlist);
2945 
2946   add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2947 Show branch tracing specific variables."),
2948                   &maint_btrace_show_cmdlist, "maintenance show btrace ",
2949                   0, &maintenance_show_cmdlist);
2950 
2951   add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2952 Show Intel Processor Trace specific variables."),
2953                   &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2954                   0, &maint_btrace_show_cmdlist);
2955 
2956   add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2957 			   &maint_btrace_pt_skip_pad, _("\
2958 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2959 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2960 When enabled, PAD packets are ignored in the btrace packet history."),
2961 			   NULL, show_maint_btrace_pt_skip_pad,
2962 			   &maint_btrace_pt_set_cmdlist,
2963 			   &maint_btrace_pt_show_cmdlist);
2964 
2965   add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2966 	   _("Print the raw branch tracing data.\n\
2967 With no argument, print ten more packets after the previous ten-line print.\n\
2968 With '-' as argument print ten packets before a previous ten-line print.\n\
2969 One argument specifies the starting packet of a ten-line print.\n\
2970 Two arguments with comma between specify starting and ending packets to \
2971 print.\n\
2972 Preceded with '+'/'-' the second argument specifies the distance from the \
2973 first.\n"),
2974 	   &maint_btrace_cmdlist);
2975 
2976   add_cmd ("clear-packet-history", class_maintenance,
2977 	   maint_btrace_clear_packet_history_cmd,
2978 	   _("Clears the branch tracing packet history.\n\
2979 Discards the raw branch tracing data but not the execution history data.\n\
2980 "),
2981 	   &maint_btrace_cmdlist);
2982 
2983   add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2984 	   _("Clears the branch tracing data.\n\
2985 Discards the raw branch tracing data and the execution history data.\n\
2986 The next 'record' command will fetch the branch tracing data anew.\n\
2987 "),
2988 	   &maint_btrace_cmdlist);
2989 
2990 }
2991