xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/record-btrace.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Branch trace support for GDB, the GNU debugger.
2 
3    Copyright (C) 2013-2016 Free Software Foundation, Inc.
4 
5    Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 
43 /* The target_ops of record-btrace.  */
44 static struct target_ops record_btrace_ops;
45 
46 /* A new thread observer enabling branch tracing for the new thread.  */
47 static struct observer *record_btrace_thread_observer;
48 
49 /* Memory access types used in set/show record btrace replay-memory-access.  */
50 static const char replay_memory_access_read_only[] = "read-only";
51 static const char replay_memory_access_read_write[] = "read-write";
52 static const char *const replay_memory_access_types[] =
53 {
54   replay_memory_access_read_only,
55   replay_memory_access_read_write,
56   NULL
57 };
58 
59 /* The currently allowed replay memory access type.  */
60 static const char *replay_memory_access = replay_memory_access_read_only;
61 
62 /* Command lists for "set/show record btrace".  */
63 static struct cmd_list_element *set_record_btrace_cmdlist;
64 static struct cmd_list_element *show_record_btrace_cmdlist;
65 
66 /* The execution direction of the last resume we got.  See record-full.c.  */
67 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68 
69 /* The async event handler for reverse/replay execution.  */
70 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71 
72 /* A flag indicating that we are currently generating a core file.  */
73 static int record_btrace_generating_corefile;
74 
75 /* The current branch trace configuration.  */
76 static struct btrace_config record_btrace_conf;
77 
78 /* Command list for "record btrace".  */
79 static struct cmd_list_element *record_btrace_cmdlist;
80 
81 /* Command lists for "set/show record btrace bts".  */
82 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
83 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
84 
85 /* Command lists for "set/show record btrace pt".  */
86 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
87 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
88 
89 /* Print a record-btrace debug message.  Use do ... while (0) to avoid
90    ambiguities when used in if statements.  */
91 
92 #define DEBUG(msg, args...)						\
93   do									\
94     {									\
95       if (record_debug != 0)						\
96         fprintf_unfiltered (gdb_stdlog,					\
97 			    "[record-btrace] " msg "\n", ##args);	\
98     }									\
99   while (0)
100 
101 
102 /* Update the branch trace for the current thread and return a pointer to its
103    thread_info.
104 
105    Throws an error if there is no thread or no trace.  This function never
106    returns NULL.  */
107 
108 static struct thread_info *
109 require_btrace_thread (void)
110 {
111   struct thread_info *tp;
112 
113   DEBUG ("require");
114 
115   tp = find_thread_ptid (inferior_ptid);
116   if (tp == NULL)
117     error (_("No thread."));
118 
119   btrace_fetch (tp);
120 
121   if (btrace_is_empty (tp))
122     error (_("No trace."));
123 
124   return tp;
125 }
126 
127 /* Update the branch trace for the current thread and return a pointer to its
128    branch trace information struct.
129 
130    Throws an error if there is no thread or no trace.  This function never
131    returns NULL.  */
132 
133 static struct btrace_thread_info *
134 require_btrace (void)
135 {
136   struct thread_info *tp;
137 
138   tp = require_btrace_thread ();
139 
140   return &tp->btrace;
141 }
142 
143 /* Enable branch tracing for one thread.  Warn on errors.  */
144 
145 static void
146 record_btrace_enable_warn (struct thread_info *tp)
147 {
148   TRY
149     {
150       btrace_enable (tp, &record_btrace_conf);
151     }
152   CATCH (error, RETURN_MASK_ERROR)
153     {
154       warning ("%s", error.message);
155     }
156   END_CATCH
157 }
158 
159 /* Callback function to disable branch tracing for one thread.  */
160 
161 static void
162 record_btrace_disable_callback (void *arg)
163 {
164   struct thread_info *tp = (struct thread_info *) arg;
165 
166   btrace_disable (tp);
167 }
168 
169 /* Enable automatic tracing of new threads.  */
170 
171 static void
172 record_btrace_auto_enable (void)
173 {
174   DEBUG ("attach thread observer");
175 
176   record_btrace_thread_observer
177     = observer_attach_new_thread (record_btrace_enable_warn);
178 }
179 
180 /* Disable automatic tracing of new threads.  */
181 
182 static void
183 record_btrace_auto_disable (void)
184 {
185   /* The observer may have been detached, already.  */
186   if (record_btrace_thread_observer == NULL)
187     return;
188 
189   DEBUG ("detach thread observer");
190 
191   observer_detach_new_thread (record_btrace_thread_observer);
192   record_btrace_thread_observer = NULL;
193 }
194 
195 /* The record-btrace async event handler function.  */
196 
197 static void
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
199 {
200   inferior_event_handler (INF_REG_EVENT, NULL);
201 }
202 
203 /* See record-btrace.h.  */
204 
205 void
206 record_btrace_push_target (void)
207 {
208   const char *format;
209 
210   record_btrace_auto_enable ();
211 
212   push_target (&record_btrace_ops);
213 
214   record_btrace_async_inferior_event_handler
215     = create_async_event_handler (record_btrace_handle_async_inferior_event,
216 				  NULL);
217   record_btrace_generating_corefile = 0;
218 
219   format = btrace_format_short_string (record_btrace_conf.format);
220   observer_notify_record_changed (current_inferior (), 1, "btrace", format);
221 }
222 
223 /* The to_open method of target record-btrace.  */
224 
225 static void
226 record_btrace_open (const char *args, int from_tty)
227 {
228   struct cleanup *disable_chain;
229   struct thread_info *tp;
230 
231   DEBUG ("open");
232 
233   record_preopen ();
234 
235   if (!target_has_execution)
236     error (_("The program is not being run."));
237 
238   gdb_assert (record_btrace_thread_observer == NULL);
239 
240   disable_chain = make_cleanup (null_cleanup, NULL);
241   ALL_NON_EXITED_THREADS (tp)
242     if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
243       {
244 	btrace_enable (tp, &record_btrace_conf);
245 
246 	make_cleanup (record_btrace_disable_callback, tp);
247       }
248 
249   record_btrace_push_target ();
250 
251   discard_cleanups (disable_chain);
252 }
253 
254 /* The to_stop_recording method of target record-btrace.  */
255 
256 static void
257 record_btrace_stop_recording (struct target_ops *self)
258 {
259   struct thread_info *tp;
260 
261   DEBUG ("stop recording");
262 
263   record_btrace_auto_disable ();
264 
265   ALL_NON_EXITED_THREADS (tp)
266     if (tp->btrace.target != NULL)
267       btrace_disable (tp);
268 }
269 
270 /* The to_disconnect method of target record-btrace.  */
271 
272 static void
273 record_btrace_disconnect (struct target_ops *self, const char *args,
274 			  int from_tty)
275 {
276   struct target_ops *beneath = self->beneath;
277 
278   /* Do not stop recording, just clean up GDB side.  */
279   unpush_target (self);
280 
281   /* Forward disconnect.  */
282   beneath->to_disconnect (beneath, args, from_tty);
283 }
284 
285 /* The to_close method of target record-btrace.  */
286 
287 static void
288 record_btrace_close (struct target_ops *self)
289 {
290   struct thread_info *tp;
291 
292   if (record_btrace_async_inferior_event_handler != NULL)
293     delete_async_event_handler (&record_btrace_async_inferior_event_handler);
294 
295   /* Make sure automatic recording gets disabled even if we did not stop
296      recording before closing the record-btrace target.  */
297   record_btrace_auto_disable ();
298 
299   /* We should have already stopped recording.
300      Tear down btrace in case we have not.  */
301   ALL_NON_EXITED_THREADS (tp)
302     btrace_teardown (tp);
303 }
304 
305 /* The to_async method of target record-btrace.  */
306 
307 static void
308 record_btrace_async (struct target_ops *ops, int enable)
309 {
310   if (enable)
311     mark_async_event_handler (record_btrace_async_inferior_event_handler);
312   else
313     clear_async_event_handler (record_btrace_async_inferior_event_handler);
314 
315   ops->beneath->to_async (ops->beneath, enable);
316 }
317 
318 /* Adjusts the size and returns a human readable size suffix.  */
319 
320 static const char *
321 record_btrace_adjust_size (unsigned int *size)
322 {
323   unsigned int sz;
324 
325   sz = *size;
326 
327   if ((sz & ((1u << 30) - 1)) == 0)
328     {
329       *size = sz >> 30;
330       return "GB";
331     }
332   else if ((sz & ((1u << 20) - 1)) == 0)
333     {
334       *size = sz >> 20;
335       return "MB";
336     }
337   else if ((sz & ((1u << 10) - 1)) == 0)
338     {
339       *size = sz >> 10;
340       return "kB";
341     }
342   else
343     return "";
344 }
345 
346 /* Print a BTS configuration.  */
347 
348 static void
349 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
350 {
351   const char *suffix;
352   unsigned int size;
353 
354   size = conf->size;
355   if (size > 0)
356     {
357       suffix = record_btrace_adjust_size (&size);
358       printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
359     }
360 }
361 
362 /* Print an Intel Processor Trace configuration.  */
363 
364 static void
365 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
366 {
367   const char *suffix;
368   unsigned int size;
369 
370   size = conf->size;
371   if (size > 0)
372     {
373       suffix = record_btrace_adjust_size (&size);
374       printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
375     }
376 }
377 
378 /* Print a branch tracing configuration.  */
379 
380 static void
381 record_btrace_print_conf (const struct btrace_config *conf)
382 {
383   printf_unfiltered (_("Recording format: %s.\n"),
384 		     btrace_format_string (conf->format));
385 
386   switch (conf->format)
387     {
388     case BTRACE_FORMAT_NONE:
389       return;
390 
391     case BTRACE_FORMAT_BTS:
392       record_btrace_print_bts_conf (&conf->bts);
393       return;
394 
395     case BTRACE_FORMAT_PT:
396       record_btrace_print_pt_conf (&conf->pt);
397       return;
398     }
399 
400   internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
401 }
402 
403 /* The to_info_record method of target record-btrace.  */
404 
405 static void
406 record_btrace_info (struct target_ops *self)
407 {
408   struct btrace_thread_info *btinfo;
409   const struct btrace_config *conf;
410   struct thread_info *tp;
411   unsigned int insns, calls, gaps;
412 
413   DEBUG ("info");
414 
415   tp = find_thread_ptid (inferior_ptid);
416   if (tp == NULL)
417     error (_("No thread."));
418 
419   btinfo = &tp->btrace;
420 
421   conf = btrace_conf (btinfo);
422   if (conf != NULL)
423     record_btrace_print_conf (conf);
424 
425   btrace_fetch (tp);
426 
427   insns = 0;
428   calls = 0;
429   gaps = 0;
430 
431   if (!btrace_is_empty (tp))
432     {
433       struct btrace_call_iterator call;
434       struct btrace_insn_iterator insn;
435 
436       btrace_call_end (&call, btinfo);
437       btrace_call_prev (&call, 1);
438       calls = btrace_call_number (&call);
439 
440       btrace_insn_end (&insn, btinfo);
441 
442       insns = btrace_insn_number (&insn);
443       if (insns != 0)
444 	{
445 	  /* The last instruction does not really belong to the trace.  */
446 	  insns -= 1;
447 	}
448       else
449 	{
450 	  unsigned int steps;
451 
452 	  /* Skip gaps at the end.  */
453 	  do
454 	    {
455 	      steps = btrace_insn_prev (&insn, 1);
456 	      if (steps == 0)
457 		break;
458 
459 	      insns = btrace_insn_number (&insn);
460 	    }
461 	  while (insns == 0);
462 	}
463 
464       gaps = btinfo->ngaps;
465     }
466 
467   printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
468 		       "for thread %s (%s).\n"), insns, calls, gaps,
469 		     print_thread_id (tp), target_pid_to_str (tp->ptid));
470 
471   if (btrace_is_replaying (tp))
472     printf_unfiltered (_("Replay in progress.  At instruction %u.\n"),
473 		       btrace_insn_number (btinfo->replay));
474 }
475 
476 /* Print a decode error.  */
477 
478 static void
479 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
480 			    enum btrace_format format)
481 {
482   const char *errstr;
483   int is_error;
484 
485   errstr = _("unknown");
486   is_error = 1;
487 
488   switch (format)
489     {
490     default:
491       break;
492 
493     case BTRACE_FORMAT_BTS:
494       switch (errcode)
495 	{
496 	default:
497 	  break;
498 
499 	case BDE_BTS_OVERFLOW:
500 	  errstr = _("instruction overflow");
501 	  break;
502 
503 	case BDE_BTS_INSN_SIZE:
504 	  errstr = _("unknown instruction");
505 	  break;
506 	}
507       break;
508 
509 #if defined (HAVE_LIBIPT)
510     case BTRACE_FORMAT_PT:
511       switch (errcode)
512 	{
513 	case BDE_PT_USER_QUIT:
514 	  is_error = 0;
515 	  errstr = _("trace decode cancelled");
516 	  break;
517 
518 	case BDE_PT_DISABLED:
519 	  is_error = 0;
520 	  errstr = _("disabled");
521 	  break;
522 
523 	case BDE_PT_OVERFLOW:
524 	  is_error = 0;
525 	  errstr = _("overflow");
526 	  break;
527 
528 	default:
529 	  if (errcode < 0)
530 	    errstr = pt_errstr (pt_errcode (errcode));
531 	  break;
532 	}
533       break;
534 #endif /* defined (HAVE_LIBIPT)  */
535     }
536 
537   ui_out_text (uiout, _("["));
538   if (is_error)
539     {
540       ui_out_text (uiout, _("decode error ("));
541       ui_out_field_int (uiout, "errcode", errcode);
542       ui_out_text (uiout, _("): "));
543     }
544   ui_out_text (uiout, errstr);
545   ui_out_text (uiout, _("]\n"));
546 }
547 
548 /* Print an unsigned int.  */
549 
550 static void
551 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
552 {
553   ui_out_field_fmt (uiout, fld, "%u", val);
554 }
555 
556 /* A range of source lines.  */
557 
558 struct btrace_line_range
559 {
560   /* The symtab this line is from.  */
561   struct symtab *symtab;
562 
563   /* The first line (inclusive).  */
564   int begin;
565 
566   /* The last line (exclusive).  */
567   int end;
568 };
569 
570 /* Construct a line range.  */
571 
572 static struct btrace_line_range
573 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
574 {
575   struct btrace_line_range range;
576 
577   range.symtab = symtab;
578   range.begin = begin;
579   range.end = end;
580 
581   return range;
582 }
583 
584 /* Add a line to a line range.  */
585 
586 static struct btrace_line_range
587 btrace_line_range_add (struct btrace_line_range range, int line)
588 {
589   if (range.end <= range.begin)
590     {
591       /* This is the first entry.  */
592       range.begin = line;
593       range.end = line + 1;
594     }
595   else if (line < range.begin)
596     range.begin = line;
597   else if (range.end < line)
598     range.end = line;
599 
600   return range;
601 }
602 
603 /* Return non-zero if RANGE is empty, zero otherwise.  */
604 
605 static int
606 btrace_line_range_is_empty (struct btrace_line_range range)
607 {
608   return range.end <= range.begin;
609 }
610 
611 /* Return non-zero if LHS contains RHS, zero otherwise.  */
612 
613 static int
614 btrace_line_range_contains_range (struct btrace_line_range lhs,
615 				  struct btrace_line_range rhs)
616 {
617   return ((lhs.symtab == rhs.symtab)
618 	  && (lhs.begin <= rhs.begin)
619 	  && (rhs.end <= lhs.end));
620 }
621 
622 /* Find the line range associated with PC.  */
623 
624 static struct btrace_line_range
625 btrace_find_line_range (CORE_ADDR pc)
626 {
627   struct btrace_line_range range;
628   struct linetable_entry *lines;
629   struct linetable *ltable;
630   struct symtab *symtab;
631   int nlines, i;
632 
633   symtab = find_pc_line_symtab (pc);
634   if (symtab == NULL)
635     return btrace_mk_line_range (NULL, 0, 0);
636 
637   ltable = SYMTAB_LINETABLE (symtab);
638   if (ltable == NULL)
639     return btrace_mk_line_range (symtab, 0, 0);
640 
641   nlines = ltable->nitems;
642   lines = ltable->item;
643   if (nlines <= 0)
644     return btrace_mk_line_range (symtab, 0, 0);
645 
646   range = btrace_mk_line_range (symtab, 0, 0);
647   for (i = 0; i < nlines - 1; i++)
648     {
649       if ((lines[i].pc == pc) && (lines[i].line != 0))
650 	range = btrace_line_range_add (range, lines[i].line);
651     }
652 
653   return range;
654 }
655 
656 /* Print source lines in LINES to UIOUT.
657 
658    UI_ITEM_CHAIN is a cleanup chain for the last source line and the
659    instructions corresponding to that source line.  When printing a new source
660    line, we do the cleanups for the open chain and open a new cleanup chain for
661    the new source line.  If the source line range in LINES is not empty, this
662    function will leave the cleanup chain for the last printed source line open
663    so instructions can be added to it.  */
664 
665 static void
666 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
667 		    struct cleanup **ui_item_chain, int flags)
668 {
669   print_source_lines_flags psl_flags;
670   int line;
671 
672   psl_flags = 0;
673   if (flags & DISASSEMBLY_FILENAME)
674     psl_flags |= PRINT_SOURCE_LINES_FILENAME;
675 
676   for (line = lines.begin; line < lines.end; ++line)
677     {
678       if (*ui_item_chain != NULL)
679 	do_cleanups (*ui_item_chain);
680 
681       *ui_item_chain
682 	= make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
683 
684       print_source_lines (lines.symtab, line, line + 1, psl_flags);
685 
686       make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
687     }
688 }
689 
690 /* Disassemble a section of the recorded instruction trace.  */
691 
692 static void
693 btrace_insn_history (struct ui_out *uiout,
694 		     const struct btrace_thread_info *btinfo,
695 		     const struct btrace_insn_iterator *begin,
696 		     const struct btrace_insn_iterator *end, int flags)
697 {
698   struct ui_file *stb;
699   struct cleanup *cleanups, *ui_item_chain;
700   struct disassemble_info di;
701   struct gdbarch *gdbarch;
702   struct btrace_insn_iterator it;
703   struct btrace_line_range last_lines;
704 
705   DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
706 	 btrace_insn_number (end));
707 
708   flags |= DISASSEMBLY_SPECULATIVE;
709 
710   gdbarch = target_gdbarch ();
711   stb = mem_fileopen ();
712   cleanups = make_cleanup_ui_file_delete (stb);
713   di = gdb_disassemble_info (gdbarch, stb);
714   last_lines = btrace_mk_line_range (NULL, 0, 0);
715 
716   make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
717 
718   /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719      instructions corresponding to that line.  */
720   ui_item_chain = NULL;
721 
722   for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
723     {
724       const struct btrace_insn *insn;
725 
726       insn = btrace_insn_get (&it);
727 
728       /* A NULL instruction indicates a gap in the trace.  */
729       if (insn == NULL)
730 	{
731 	  const struct btrace_config *conf;
732 
733 	  conf = btrace_conf (btinfo);
734 
735 	  /* We have trace so we must have a configuration.  */
736 	  gdb_assert (conf != NULL);
737 
738 	  btrace_ui_out_decode_error (uiout, it.function->errcode,
739 				      conf->format);
740 	}
741       else
742 	{
743 	  struct disasm_insn dinsn;
744 
745 	  if ((flags & DISASSEMBLY_SOURCE) != 0)
746 	    {
747 	      struct btrace_line_range lines;
748 
749 	      lines = btrace_find_line_range (insn->pc);
750 	      if (!btrace_line_range_is_empty (lines)
751 		  && !btrace_line_range_contains_range (last_lines, lines))
752 		{
753 		  btrace_print_lines (lines, uiout, &ui_item_chain, flags);
754 		  last_lines = lines;
755 		}
756 	      else if (ui_item_chain == NULL)
757 		{
758 		  ui_item_chain
759 		    = make_cleanup_ui_out_tuple_begin_end (uiout,
760 							   "src_and_asm_line");
761 		  /* No source information.  */
762 		  make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
763 		}
764 
765 	      gdb_assert (ui_item_chain != NULL);
766 	    }
767 
768 	  memset (&dinsn, 0, sizeof (dinsn));
769 	  dinsn.number = btrace_insn_number (&it);
770 	  dinsn.addr = insn->pc;
771 
772 	  if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
773 	    dinsn.is_speculative = 1;
774 
775 	  gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
776 	}
777     }
778 
779   do_cleanups (cleanups);
780 }
781 
782 /* The to_insn_history method of target record-btrace.  */
783 
784 static void
785 record_btrace_insn_history (struct target_ops *self, int size, int flags)
786 {
787   struct btrace_thread_info *btinfo;
788   struct btrace_insn_history *history;
789   struct btrace_insn_iterator begin, end;
790   struct cleanup *uiout_cleanup;
791   struct ui_out *uiout;
792   unsigned int context, covered;
793 
794   uiout = current_uiout;
795   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
796 						       "insn history");
797   context = abs (size);
798   if (context == 0)
799     error (_("Bad record instruction-history-size."));
800 
801   btinfo = require_btrace ();
802   history = btinfo->insn_history;
803   if (history == NULL)
804     {
805       struct btrace_insn_iterator *replay;
806 
807       DEBUG ("insn-history (0x%x): %d", flags, size);
808 
809       /* If we're replaying, we start at the replay position.  Otherwise, we
810 	 start at the tail of the trace.  */
811       replay = btinfo->replay;
812       if (replay != NULL)
813 	begin = *replay;
814       else
815 	btrace_insn_end (&begin, btinfo);
816 
817       /* We start from here and expand in the requested direction.  Then we
818 	 expand in the other direction, as well, to fill up any remaining
819 	 context.  */
820       end = begin;
821       if (size < 0)
822 	{
823 	  /* We want the current position covered, as well.  */
824 	  covered = btrace_insn_next (&end, 1);
825 	  covered += btrace_insn_prev (&begin, context - covered);
826 	  covered += btrace_insn_next (&end, context - covered);
827 	}
828       else
829 	{
830 	  covered = btrace_insn_next (&end, context);
831 	  covered += btrace_insn_prev (&begin, context - covered);
832 	}
833     }
834   else
835     {
836       begin = history->begin;
837       end = history->end;
838 
839       DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
840 	     btrace_insn_number (&begin), btrace_insn_number (&end));
841 
842       if (size < 0)
843 	{
844 	  end = begin;
845 	  covered = btrace_insn_prev (&begin, context);
846 	}
847       else
848 	{
849 	  begin = end;
850 	  covered = btrace_insn_next (&end, context);
851 	}
852     }
853 
854   if (covered > 0)
855     btrace_insn_history (uiout, btinfo, &begin, &end, flags);
856   else
857     {
858       if (size < 0)
859 	printf_unfiltered (_("At the start of the branch trace record.\n"));
860       else
861 	printf_unfiltered (_("At the end of the branch trace record.\n"));
862     }
863 
864   btrace_set_insn_history (btinfo, &begin, &end);
865   do_cleanups (uiout_cleanup);
866 }
867 
868 /* The to_insn_history_range method of target record-btrace.  */
869 
870 static void
871 record_btrace_insn_history_range (struct target_ops *self,
872 				  ULONGEST from, ULONGEST to, int flags)
873 {
874   struct btrace_thread_info *btinfo;
875   struct btrace_insn_history *history;
876   struct btrace_insn_iterator begin, end;
877   struct cleanup *uiout_cleanup;
878   struct ui_out *uiout;
879   unsigned int low, high;
880   int found;
881 
882   uiout = current_uiout;
883   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
884 						       "insn history");
885   low = from;
886   high = to;
887 
888   DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
889 
890   /* Check for wrap-arounds.  */
891   if (low != from || high != to)
892     error (_("Bad range."));
893 
894   if (high < low)
895     error (_("Bad range."));
896 
897   btinfo = require_btrace ();
898 
899   found = btrace_find_insn_by_number (&begin, btinfo, low);
900   if (found == 0)
901     error (_("Range out of bounds."));
902 
903   found = btrace_find_insn_by_number (&end, btinfo, high);
904   if (found == 0)
905     {
906       /* Silently truncate the range.  */
907       btrace_insn_end (&end, btinfo);
908     }
909   else
910     {
911       /* We want both begin and end to be inclusive.  */
912       btrace_insn_next (&end, 1);
913     }
914 
915   btrace_insn_history (uiout, btinfo, &begin, &end, flags);
916   btrace_set_insn_history (btinfo, &begin, &end);
917 
918   do_cleanups (uiout_cleanup);
919 }
920 
921 /* The to_insn_history_from method of target record-btrace.  */
922 
923 static void
924 record_btrace_insn_history_from (struct target_ops *self,
925 				 ULONGEST from, int size, int flags)
926 {
927   ULONGEST begin, end, context;
928 
929   context = abs (size);
930   if (context == 0)
931     error (_("Bad record instruction-history-size."));
932 
933   if (size < 0)
934     {
935       end = from;
936 
937       if (from < context)
938 	begin = 0;
939       else
940 	begin = from - context + 1;
941     }
942   else
943     {
944       begin = from;
945       end = from + context - 1;
946 
947       /* Check for wrap-around.  */
948       if (end < begin)
949 	end = ULONGEST_MAX;
950     }
951 
952   record_btrace_insn_history_range (self, begin, end, flags);
953 }
954 
955 /* Print the instruction number range for a function call history line.  */
956 
957 static void
958 btrace_call_history_insn_range (struct ui_out *uiout,
959 				const struct btrace_function *bfun)
960 {
961   unsigned int begin, end, size;
962 
963   size = VEC_length (btrace_insn_s, bfun->insn);
964   gdb_assert (size > 0);
965 
966   begin = bfun->insn_offset;
967   end = begin + size - 1;
968 
969   ui_out_field_uint (uiout, "insn begin", begin);
970   ui_out_text (uiout, ",");
971   ui_out_field_uint (uiout, "insn end", end);
972 }
973 
974 /* Compute the lowest and highest source line for the instructions in BFUN
975    and return them in PBEGIN and PEND.
976    Ignore instructions that can't be mapped to BFUN, e.g. instructions that
977    result from inlining or macro expansion.  */
978 
979 static void
980 btrace_compute_src_line_range (const struct btrace_function *bfun,
981 			       int *pbegin, int *pend)
982 {
983   struct btrace_insn *insn;
984   struct symtab *symtab;
985   struct symbol *sym;
986   unsigned int idx;
987   int begin, end;
988 
989   begin = INT_MAX;
990   end = INT_MIN;
991 
992   sym = bfun->sym;
993   if (sym == NULL)
994     goto out;
995 
996   symtab = symbol_symtab (sym);
997 
998   for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
999     {
1000       struct symtab_and_line sal;
1001 
1002       sal = find_pc_line (insn->pc, 0);
1003       if (sal.symtab != symtab || sal.line == 0)
1004 	continue;
1005 
1006       begin = min (begin, sal.line);
1007       end = max (end, sal.line);
1008     }
1009 
1010  out:
1011   *pbegin = begin;
1012   *pend = end;
1013 }
1014 
1015 /* Print the source line information for a function call history line.  */
1016 
1017 static void
1018 btrace_call_history_src_line (struct ui_out *uiout,
1019 			      const struct btrace_function *bfun)
1020 {
1021   struct symbol *sym;
1022   int begin, end;
1023 
1024   sym = bfun->sym;
1025   if (sym == NULL)
1026     return;
1027 
1028   ui_out_field_string (uiout, "file",
1029 		       symtab_to_filename_for_display (symbol_symtab (sym)));
1030 
1031   btrace_compute_src_line_range (bfun, &begin, &end);
1032   if (end < begin)
1033     return;
1034 
1035   ui_out_text (uiout, ":");
1036   ui_out_field_int (uiout, "min line", begin);
1037 
1038   if (end == begin)
1039     return;
1040 
1041   ui_out_text (uiout, ",");
1042   ui_out_field_int (uiout, "max line", end);
1043 }
1044 
1045 /* Get the name of a branch trace function.  */
1046 
1047 static const char *
1048 btrace_get_bfun_name (const struct btrace_function *bfun)
1049 {
1050   struct minimal_symbol *msym;
1051   struct symbol *sym;
1052 
1053   if (bfun == NULL)
1054     return "??";
1055 
1056   msym = bfun->msym;
1057   sym = bfun->sym;
1058 
1059   if (sym != NULL)
1060     return SYMBOL_PRINT_NAME (sym);
1061   else if (msym != NULL)
1062     return MSYMBOL_PRINT_NAME (msym);
1063   else
1064     return "??";
1065 }
1066 
1067 /* Disassemble a section of the recorded function trace.  */
1068 
1069 static void
1070 btrace_call_history (struct ui_out *uiout,
1071 		     const struct btrace_thread_info *btinfo,
1072 		     const struct btrace_call_iterator *begin,
1073 		     const struct btrace_call_iterator *end,
1074 		     int int_flags)
1075 {
1076   struct btrace_call_iterator it;
1077   record_print_flags flags = (enum record_print_flag) int_flags;
1078 
1079   DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1080 	 btrace_call_number (end));
1081 
1082   for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1083     {
1084       const struct btrace_function *bfun;
1085       struct minimal_symbol *msym;
1086       struct symbol *sym;
1087 
1088       bfun = btrace_call_get (&it);
1089       sym = bfun->sym;
1090       msym = bfun->msym;
1091 
1092       /* Print the function index.  */
1093       ui_out_field_uint (uiout, "index", bfun->number);
1094       ui_out_text (uiout, "\t");
1095 
1096       /* Indicate gaps in the trace.  */
1097       if (bfun->errcode != 0)
1098 	{
1099 	  const struct btrace_config *conf;
1100 
1101 	  conf = btrace_conf (btinfo);
1102 
1103 	  /* We have trace so we must have a configuration.  */
1104 	  gdb_assert (conf != NULL);
1105 
1106 	  btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1107 
1108 	  continue;
1109 	}
1110 
1111       if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1112 	{
1113 	  int level = bfun->level + btinfo->level, i;
1114 
1115 	  for (i = 0; i < level; ++i)
1116 	    ui_out_text (uiout, "  ");
1117 	}
1118 
1119       if (sym != NULL)
1120 	ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1121       else if (msym != NULL)
1122 	ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1123       else if (!ui_out_is_mi_like_p (uiout))
1124 	ui_out_field_string (uiout, "function", "??");
1125 
1126       if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1127 	{
1128 	  ui_out_text (uiout, _("\tinst "));
1129 	  btrace_call_history_insn_range (uiout, bfun);
1130 	}
1131 
1132       if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1133 	{
1134 	  ui_out_text (uiout, _("\tat "));
1135 	  btrace_call_history_src_line (uiout, bfun);
1136 	}
1137 
1138       ui_out_text (uiout, "\n");
1139     }
1140 }
1141 
1142 /* The to_call_history method of target record-btrace.  */
1143 
1144 static void
1145 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1146 {
1147   struct btrace_thread_info *btinfo;
1148   struct btrace_call_history *history;
1149   struct btrace_call_iterator begin, end;
1150   struct cleanup *uiout_cleanup;
1151   struct ui_out *uiout;
1152   unsigned int context, covered;
1153   record_print_flags flags = (enum record_print_flag) int_flags;
1154 
1155   uiout = current_uiout;
1156   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1157 						       "insn history");
1158   context = abs (size);
1159   if (context == 0)
1160     error (_("Bad record function-call-history-size."));
1161 
1162   btinfo = require_btrace ();
1163   history = btinfo->call_history;
1164   if (history == NULL)
1165     {
1166       struct btrace_insn_iterator *replay;
1167 
1168       DEBUG ("call-history (0x%x): %d", int_flags, size);
1169 
1170       /* If we're replaying, we start at the replay position.  Otherwise, we
1171 	 start at the tail of the trace.  */
1172       replay = btinfo->replay;
1173       if (replay != NULL)
1174 	{
1175 	  begin.function = replay->function;
1176 	  begin.btinfo = btinfo;
1177 	}
1178       else
1179 	btrace_call_end (&begin, btinfo);
1180 
1181       /* We start from here and expand in the requested direction.  Then we
1182 	 expand in the other direction, as well, to fill up any remaining
1183 	 context.  */
1184       end = begin;
1185       if (size < 0)
1186 	{
1187 	  /* We want the current position covered, as well.  */
1188 	  covered = btrace_call_next (&end, 1);
1189 	  covered += btrace_call_prev (&begin, context - covered);
1190 	  covered += btrace_call_next (&end, context - covered);
1191 	}
1192       else
1193 	{
1194 	  covered = btrace_call_next (&end, context);
1195 	  covered += btrace_call_prev (&begin, context- covered);
1196 	}
1197     }
1198   else
1199     {
1200       begin = history->begin;
1201       end = history->end;
1202 
1203       DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1204 	     btrace_call_number (&begin), btrace_call_number (&end));
1205 
1206       if (size < 0)
1207 	{
1208 	  end = begin;
1209 	  covered = btrace_call_prev (&begin, context);
1210 	}
1211       else
1212 	{
1213 	  begin = end;
1214 	  covered = btrace_call_next (&end, context);
1215 	}
1216     }
1217 
1218   if (covered > 0)
1219     btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220   else
1221     {
1222       if (size < 0)
1223 	printf_unfiltered (_("At the start of the branch trace record.\n"));
1224       else
1225 	printf_unfiltered (_("At the end of the branch trace record.\n"));
1226     }
1227 
1228   btrace_set_call_history (btinfo, &begin, &end);
1229   do_cleanups (uiout_cleanup);
1230 }
1231 
1232 /* The to_call_history_range method of target record-btrace.  */
1233 
1234 static void
1235 record_btrace_call_history_range (struct target_ops *self,
1236 				  ULONGEST from, ULONGEST to,
1237 				  int int_flags)
1238 {
1239   struct btrace_thread_info *btinfo;
1240   struct btrace_call_history *history;
1241   struct btrace_call_iterator begin, end;
1242   struct cleanup *uiout_cleanup;
1243   struct ui_out *uiout;
1244   unsigned int low, high;
1245   int found;
1246   record_print_flags flags = (enum record_print_flag) int_flags;
1247 
1248   uiout = current_uiout;
1249   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1250 						       "func history");
1251   low = from;
1252   high = to;
1253 
1254   DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1255 
1256   /* Check for wrap-arounds.  */
1257   if (low != from || high != to)
1258     error (_("Bad range."));
1259 
1260   if (high < low)
1261     error (_("Bad range."));
1262 
1263   btinfo = require_btrace ();
1264 
1265   found = btrace_find_call_by_number (&begin, btinfo, low);
1266   if (found == 0)
1267     error (_("Range out of bounds."));
1268 
1269   found = btrace_find_call_by_number (&end, btinfo, high);
1270   if (found == 0)
1271     {
1272       /* Silently truncate the range.  */
1273       btrace_call_end (&end, btinfo);
1274     }
1275   else
1276     {
1277       /* We want both begin and end to be inclusive.  */
1278       btrace_call_next (&end, 1);
1279     }
1280 
1281   btrace_call_history (uiout, btinfo, &begin, &end, flags);
1282   btrace_set_call_history (btinfo, &begin, &end);
1283 
1284   do_cleanups (uiout_cleanup);
1285 }
1286 
1287 /* The to_call_history_from method of target record-btrace.  */
1288 
1289 static void
1290 record_btrace_call_history_from (struct target_ops *self,
1291 				 ULONGEST from, int size,
1292 				 int int_flags)
1293 {
1294   ULONGEST begin, end, context;
1295   record_print_flags flags = (enum record_print_flag) int_flags;
1296 
1297   context = abs (size);
1298   if (context == 0)
1299     error (_("Bad record function-call-history-size."));
1300 
1301   if (size < 0)
1302     {
1303       end = from;
1304 
1305       if (from < context)
1306 	begin = 0;
1307       else
1308 	begin = from - context + 1;
1309     }
1310   else
1311     {
1312       begin = from;
1313       end = from + context - 1;
1314 
1315       /* Check for wrap-around.  */
1316       if (end < begin)
1317 	end = ULONGEST_MAX;
1318     }
1319 
1320   record_btrace_call_history_range (self, begin, end, flags);
1321 }
1322 
1323 /* The to_record_is_replaying method of target record-btrace.  */
1324 
1325 static int
1326 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1327 {
1328   struct thread_info *tp;
1329 
1330   ALL_NON_EXITED_THREADS (tp)
1331     if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1332       return 1;
1333 
1334   return 0;
1335 }
1336 
1337 /* The to_record_will_replay method of target record-btrace.  */
1338 
1339 static int
1340 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1341 {
1342   return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1343 }
1344 
1345 /* The to_xfer_partial method of target record-btrace.  */
1346 
1347 static enum target_xfer_status
1348 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1349 			    const char *annex, gdb_byte *readbuf,
1350 			    const gdb_byte *writebuf, ULONGEST offset,
1351 			    ULONGEST len, ULONGEST *xfered_len)
1352 {
1353   struct target_ops *t;
1354 
1355   /* Filter out requests that don't make sense during replay.  */
1356   if (replay_memory_access == replay_memory_access_read_only
1357       && !record_btrace_generating_corefile
1358       && record_btrace_is_replaying (ops, inferior_ptid))
1359     {
1360       switch (object)
1361 	{
1362 	case TARGET_OBJECT_MEMORY:
1363 	  {
1364 	    struct target_section *section;
1365 
1366 	    /* We do not allow writing memory in general.  */
1367 	    if (writebuf != NULL)
1368 	      {
1369 		*xfered_len = len;
1370 		return TARGET_XFER_UNAVAILABLE;
1371 	      }
1372 
1373 	    /* We allow reading readonly memory.  */
1374 	    section = target_section_by_addr (ops, offset);
1375 	    if (section != NULL)
1376 	      {
1377 		/* Check if the section we found is readonly.  */
1378 		if ((bfd_get_section_flags (section->the_bfd_section->owner,
1379 					    section->the_bfd_section)
1380 		     & SEC_READONLY) != 0)
1381 		  {
1382 		    /* Truncate the request to fit into this section.  */
1383 		    len = min (len, section->endaddr - offset);
1384 		    break;
1385 		  }
1386 	      }
1387 
1388 	    *xfered_len = len;
1389 	    return TARGET_XFER_UNAVAILABLE;
1390 	  }
1391 	}
1392     }
1393 
1394   /* Forward the request.  */
1395   ops = ops->beneath;
1396   return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1397 			       offset, len, xfered_len);
1398 }
1399 
1400 /* The to_insert_breakpoint method of target record-btrace.  */
1401 
1402 static int
1403 record_btrace_insert_breakpoint (struct target_ops *ops,
1404 				 struct gdbarch *gdbarch,
1405 				 struct bp_target_info *bp_tgt)
1406 {
1407   const char *old;
1408   int ret;
1409 
1410   /* Inserting breakpoints requires accessing memory.  Allow it for the
1411      duration of this function.  */
1412   old = replay_memory_access;
1413   replay_memory_access = replay_memory_access_read_write;
1414 
1415   ret = 0;
1416   TRY
1417     {
1418       ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1419     }
1420   CATCH (except, RETURN_MASK_ALL)
1421     {
1422       replay_memory_access = old;
1423       throw_exception (except);
1424     }
1425   END_CATCH
1426   replay_memory_access = old;
1427 
1428   return ret;
1429 }
1430 
1431 /* The to_remove_breakpoint method of target record-btrace.  */
1432 
1433 static int
1434 record_btrace_remove_breakpoint (struct target_ops *ops,
1435 				 struct gdbarch *gdbarch,
1436 				 struct bp_target_info *bp_tgt,
1437 				 enum remove_bp_reason reason)
1438 {
1439   const char *old;
1440   int ret;
1441 
1442   /* Removing breakpoints requires accessing memory.  Allow it for the
1443      duration of this function.  */
1444   old = replay_memory_access;
1445   replay_memory_access = replay_memory_access_read_write;
1446 
1447   ret = 0;
1448   TRY
1449     {
1450       ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1451 						reason);
1452     }
1453   CATCH (except, RETURN_MASK_ALL)
1454     {
1455       replay_memory_access = old;
1456       throw_exception (except);
1457     }
1458   END_CATCH
1459   replay_memory_access = old;
1460 
1461   return ret;
1462 }
1463 
1464 /* The to_fetch_registers method of target record-btrace.  */
1465 
1466 static void
1467 record_btrace_fetch_registers (struct target_ops *ops,
1468 			       struct regcache *regcache, int regno)
1469 {
1470   struct btrace_insn_iterator *replay;
1471   struct thread_info *tp;
1472 
1473   tp = find_thread_ptid (inferior_ptid);
1474   gdb_assert (tp != NULL);
1475 
1476   replay = tp->btrace.replay;
1477   if (replay != NULL && !record_btrace_generating_corefile)
1478     {
1479       const struct btrace_insn *insn;
1480       struct gdbarch *gdbarch;
1481       int pcreg;
1482 
1483       gdbarch = get_regcache_arch (regcache);
1484       pcreg = gdbarch_pc_regnum (gdbarch);
1485       if (pcreg < 0)
1486 	return;
1487 
1488       /* We can only provide the PC register.  */
1489       if (regno >= 0 && regno != pcreg)
1490 	return;
1491 
1492       insn = btrace_insn_get (replay);
1493       gdb_assert (insn != NULL);
1494 
1495       regcache_raw_supply (regcache, regno, &insn->pc);
1496     }
1497   else
1498     {
1499       struct target_ops *t = ops->beneath;
1500 
1501       t->to_fetch_registers (t, regcache, regno);
1502     }
1503 }
1504 
1505 /* The to_store_registers method of target record-btrace.  */
1506 
1507 static void
1508 record_btrace_store_registers (struct target_ops *ops,
1509 			       struct regcache *regcache, int regno)
1510 {
1511   struct target_ops *t;
1512 
1513   if (!record_btrace_generating_corefile
1514       && record_btrace_is_replaying (ops, inferior_ptid))
1515     error (_("Cannot write registers while replaying."));
1516 
1517   gdb_assert (may_write_registers != 0);
1518 
1519   t = ops->beneath;
1520   t->to_store_registers (t, regcache, regno);
1521 }
1522 
1523 /* The to_prepare_to_store method of target record-btrace.  */
1524 
1525 static void
1526 record_btrace_prepare_to_store (struct target_ops *ops,
1527 				struct regcache *regcache)
1528 {
1529   struct target_ops *t;
1530 
1531   if (!record_btrace_generating_corefile
1532       && record_btrace_is_replaying (ops, inferior_ptid))
1533     return;
1534 
1535   t = ops->beneath;
1536   t->to_prepare_to_store (t, regcache);
1537 }
1538 
1539 /* The branch trace frame cache.  */
1540 
1541 struct btrace_frame_cache
1542 {
1543   /* The thread.  */
1544   struct thread_info *tp;
1545 
1546   /* The frame info.  */
1547   struct frame_info *frame;
1548 
1549   /* The branch trace function segment.  */
1550   const struct btrace_function *bfun;
1551 };
1552 
1553 /* A struct btrace_frame_cache hash table indexed by NEXT.  */
1554 
1555 static htab_t bfcache;
1556 
1557 /* hash_f for htab_create_alloc of bfcache.  */
1558 
1559 static hashval_t
1560 bfcache_hash (const void *arg)
1561 {
1562   const struct btrace_frame_cache *cache
1563     = (const struct btrace_frame_cache *) arg;
1564 
1565   return htab_hash_pointer (cache->frame);
1566 }
1567 
1568 /* eq_f for htab_create_alloc of bfcache.  */
1569 
1570 static int
1571 bfcache_eq (const void *arg1, const void *arg2)
1572 {
1573   const struct btrace_frame_cache *cache1
1574     = (const struct btrace_frame_cache *) arg1;
1575   const struct btrace_frame_cache *cache2
1576     = (const struct btrace_frame_cache *) arg2;
1577 
1578   return cache1->frame == cache2->frame;
1579 }
1580 
1581 /* Create a new btrace frame cache.  */
1582 
1583 static struct btrace_frame_cache *
1584 bfcache_new (struct frame_info *frame)
1585 {
1586   struct btrace_frame_cache *cache;
1587   void **slot;
1588 
1589   cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1590   cache->frame = frame;
1591 
1592   slot = htab_find_slot (bfcache, cache, INSERT);
1593   gdb_assert (*slot == NULL);
1594   *slot = cache;
1595 
1596   return cache;
1597 }
1598 
1599 /* Extract the branch trace function from a branch trace frame.  */
1600 
1601 static const struct btrace_function *
1602 btrace_get_frame_function (struct frame_info *frame)
1603 {
1604   const struct btrace_frame_cache *cache;
1605   const struct btrace_function *bfun;
1606   struct btrace_frame_cache pattern;
1607   void **slot;
1608 
1609   pattern.frame = frame;
1610 
1611   slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1612   if (slot == NULL)
1613     return NULL;
1614 
1615   cache = (const struct btrace_frame_cache *) *slot;
1616   return cache->bfun;
1617 }
1618 
1619 /* Implement stop_reason method for record_btrace_frame_unwind.  */
1620 
1621 static enum unwind_stop_reason
1622 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1623 					void **this_cache)
1624 {
1625   const struct btrace_frame_cache *cache;
1626   const struct btrace_function *bfun;
1627 
1628   cache = (const struct btrace_frame_cache *) *this_cache;
1629   bfun = cache->bfun;
1630   gdb_assert (bfun != NULL);
1631 
1632   if (bfun->up == NULL)
1633     return UNWIND_UNAVAILABLE;
1634 
1635   return UNWIND_NO_REASON;
1636 }
1637 
1638 /* Implement this_id method for record_btrace_frame_unwind.  */
1639 
1640 static void
1641 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1642 			     struct frame_id *this_id)
1643 {
1644   const struct btrace_frame_cache *cache;
1645   const struct btrace_function *bfun;
1646   CORE_ADDR code, special;
1647 
1648   cache = (const struct btrace_frame_cache *) *this_cache;
1649 
1650   bfun = cache->bfun;
1651   gdb_assert (bfun != NULL);
1652 
1653   while (bfun->segment.prev != NULL)
1654     bfun = bfun->segment.prev;
1655 
1656   code = get_frame_func (this_frame);
1657   special = bfun->number;
1658 
1659   *this_id = frame_id_build_unavailable_stack_special (code, special);
1660 
1661   DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1662 	 btrace_get_bfun_name (cache->bfun),
1663 	 core_addr_to_string_nz (this_id->code_addr),
1664 	 core_addr_to_string_nz (this_id->special_addr));
1665 }
1666 
1667 /* Implement prev_register method for record_btrace_frame_unwind.  */
1668 
1669 static struct value *
1670 record_btrace_frame_prev_register (struct frame_info *this_frame,
1671 				   void **this_cache,
1672 				   int regnum)
1673 {
1674   const struct btrace_frame_cache *cache;
1675   const struct btrace_function *bfun, *caller;
1676   const struct btrace_insn *insn;
1677   struct gdbarch *gdbarch;
1678   CORE_ADDR pc;
1679   int pcreg;
1680 
1681   gdbarch = get_frame_arch (this_frame);
1682   pcreg = gdbarch_pc_regnum (gdbarch);
1683   if (pcreg < 0 || regnum != pcreg)
1684     throw_error (NOT_AVAILABLE_ERROR,
1685 		 _("Registers are not available in btrace record history"));
1686 
1687   cache = (const struct btrace_frame_cache *) *this_cache;
1688   bfun = cache->bfun;
1689   gdb_assert (bfun != NULL);
1690 
1691   caller = bfun->up;
1692   if (caller == NULL)
1693     throw_error (NOT_AVAILABLE_ERROR,
1694 		 _("No caller in btrace record history"));
1695 
1696   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1697     {
1698       insn = VEC_index (btrace_insn_s, caller->insn, 0);
1699       pc = insn->pc;
1700     }
1701   else
1702     {
1703       insn = VEC_last (btrace_insn_s, caller->insn);
1704       pc = insn->pc;
1705 
1706       pc += gdb_insn_length (gdbarch, pc);
1707     }
1708 
1709   DEBUG ("[frame] unwound PC in %s on level %d: %s",
1710 	 btrace_get_bfun_name (bfun), bfun->level,
1711 	 core_addr_to_string_nz (pc));
1712 
1713   return frame_unwind_got_address (this_frame, regnum, pc);
1714 }
1715 
1716 /* Implement sniffer method for record_btrace_frame_unwind.  */
1717 
1718 static int
1719 record_btrace_frame_sniffer (const struct frame_unwind *self,
1720 			     struct frame_info *this_frame,
1721 			     void **this_cache)
1722 {
1723   const struct btrace_function *bfun;
1724   struct btrace_frame_cache *cache;
1725   struct thread_info *tp;
1726   struct frame_info *next;
1727 
1728   /* THIS_FRAME does not contain a reference to its thread.  */
1729   tp = find_thread_ptid (inferior_ptid);
1730   gdb_assert (tp != NULL);
1731 
1732   bfun = NULL;
1733   next = get_next_frame (this_frame);
1734   if (next == NULL)
1735     {
1736       const struct btrace_insn_iterator *replay;
1737 
1738       replay = tp->btrace.replay;
1739       if (replay != NULL)
1740 	bfun = replay->function;
1741     }
1742   else
1743     {
1744       const struct btrace_function *callee;
1745 
1746       callee = btrace_get_frame_function (next);
1747       if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1748 	bfun = callee->up;
1749     }
1750 
1751   if (bfun == NULL)
1752     return 0;
1753 
1754   DEBUG ("[frame] sniffed frame for %s on level %d",
1755 	 btrace_get_bfun_name (bfun), bfun->level);
1756 
1757   /* This is our frame.  Initialize the frame cache.  */
1758   cache = bfcache_new (this_frame);
1759   cache->tp = tp;
1760   cache->bfun = bfun;
1761 
1762   *this_cache = cache;
1763   return 1;
1764 }
1765 
1766 /* Implement sniffer method for record_btrace_tailcall_frame_unwind.  */
1767 
1768 static int
1769 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1770 				      struct frame_info *this_frame,
1771 				      void **this_cache)
1772 {
1773   const struct btrace_function *bfun, *callee;
1774   struct btrace_frame_cache *cache;
1775   struct frame_info *next;
1776 
1777   next = get_next_frame (this_frame);
1778   if (next == NULL)
1779     return 0;
1780 
1781   callee = btrace_get_frame_function (next);
1782   if (callee == NULL)
1783     return 0;
1784 
1785   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1786     return 0;
1787 
1788   bfun = callee->up;
1789   if (bfun == NULL)
1790     return 0;
1791 
1792   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1793 	 btrace_get_bfun_name (bfun), bfun->level);
1794 
1795   /* This is our frame.  Initialize the frame cache.  */
1796   cache = bfcache_new (this_frame);
1797   cache->tp = find_thread_ptid (inferior_ptid);
1798   cache->bfun = bfun;
1799 
1800   *this_cache = cache;
1801   return 1;
1802 }
1803 
1804 static void
1805 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1806 {
1807   struct btrace_frame_cache *cache;
1808   void **slot;
1809 
1810   cache = (struct btrace_frame_cache *) this_cache;
1811 
1812   slot = htab_find_slot (bfcache, cache, NO_INSERT);
1813   gdb_assert (slot != NULL);
1814 
1815   htab_remove_elt (bfcache, cache);
1816 }
1817 
1818 /* btrace recording does not store previous memory content, neither the stack
1819    frames content.  Any unwinding would return errorneous results as the stack
1820    contents no longer matches the changed PC value restored from history.
1821    Therefore this unwinder reports any possibly unwound registers as
1822    <unavailable>.  */
1823 
1824 const struct frame_unwind record_btrace_frame_unwind =
1825 {
1826   NORMAL_FRAME,
1827   record_btrace_frame_unwind_stop_reason,
1828   record_btrace_frame_this_id,
1829   record_btrace_frame_prev_register,
1830   NULL,
1831   record_btrace_frame_sniffer,
1832   record_btrace_frame_dealloc_cache
1833 };
1834 
1835 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1836 {
1837   TAILCALL_FRAME,
1838   record_btrace_frame_unwind_stop_reason,
1839   record_btrace_frame_this_id,
1840   record_btrace_frame_prev_register,
1841   NULL,
1842   record_btrace_tailcall_frame_sniffer,
1843   record_btrace_frame_dealloc_cache
1844 };
1845 
1846 /* Implement the to_get_unwinder method.  */
1847 
1848 static const struct frame_unwind *
1849 record_btrace_to_get_unwinder (struct target_ops *self)
1850 {
1851   return &record_btrace_frame_unwind;
1852 }
1853 
1854 /* Implement the to_get_tailcall_unwinder method.  */
1855 
1856 static const struct frame_unwind *
1857 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1858 {
1859   return &record_btrace_tailcall_frame_unwind;
1860 }
1861 
1862 /* Return a human-readable string for FLAG.  */
1863 
1864 static const char *
1865 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1866 {
1867   switch (flag)
1868     {
1869     case BTHR_STEP:
1870       return "step";
1871 
1872     case BTHR_RSTEP:
1873       return "reverse-step";
1874 
1875     case BTHR_CONT:
1876       return "cont";
1877 
1878     case BTHR_RCONT:
1879       return "reverse-cont";
1880 
1881     case BTHR_STOP:
1882       return "stop";
1883     }
1884 
1885   return "<invalid>";
1886 }
1887 
1888 /* Indicate that TP should be resumed according to FLAG.  */
1889 
1890 static void
1891 record_btrace_resume_thread (struct thread_info *tp,
1892 			     enum btrace_thread_flag flag)
1893 {
1894   struct btrace_thread_info *btinfo;
1895 
1896   DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1897 	 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1898 
1899   btinfo = &tp->btrace;
1900 
1901   /* Fetch the latest branch trace.  */
1902   btrace_fetch (tp);
1903 
1904   /* A resume request overwrites a preceding resume or stop request.  */
1905   btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1906   btinfo->flags |= flag;
1907 }
1908 
1909 /* Get the current frame for TP.  */
1910 
1911 static struct frame_info *
1912 get_thread_current_frame (struct thread_info *tp)
1913 {
1914   struct frame_info *frame;
1915   ptid_t old_inferior_ptid;
1916   int executing;
1917 
1918   /* Set INFERIOR_PTID, which is implicitly used by get_current_frame.  */
1919   old_inferior_ptid = inferior_ptid;
1920   inferior_ptid = tp->ptid;
1921 
1922   /* Clear the executing flag to allow changes to the current frame.
1923      We are not actually running, yet.  We just started a reverse execution
1924      command or a record goto command.
1925      For the latter, EXECUTING is false and this has no effect.
1926      For the former, EXECUTING is true and we're in to_wait, about to
1927      move the thread.  Since we need to recompute the stack, we temporarily
1928      set EXECUTING to flase.  */
1929   executing = is_executing (inferior_ptid);
1930   set_executing (inferior_ptid, 0);
1931 
1932   frame = NULL;
1933   TRY
1934     {
1935       frame = get_current_frame ();
1936     }
1937   CATCH (except, RETURN_MASK_ALL)
1938     {
1939       /* Restore the previous execution state.  */
1940       set_executing (inferior_ptid, executing);
1941 
1942       /* Restore the previous inferior_ptid.  */
1943       inferior_ptid = old_inferior_ptid;
1944 
1945       throw_exception (except);
1946     }
1947   END_CATCH
1948 
1949   /* Restore the previous execution state.  */
1950   set_executing (inferior_ptid, executing);
1951 
1952   /* Restore the previous inferior_ptid.  */
1953   inferior_ptid = old_inferior_ptid;
1954 
1955   return frame;
1956 }
1957 
1958 /* Start replaying a thread.  */
1959 
1960 static struct btrace_insn_iterator *
1961 record_btrace_start_replaying (struct thread_info *tp)
1962 {
1963   struct btrace_insn_iterator *replay;
1964   struct btrace_thread_info *btinfo;
1965 
1966   btinfo = &tp->btrace;
1967   replay = NULL;
1968 
1969   /* We can't start replaying without trace.  */
1970   if (btinfo->begin == NULL)
1971     return NULL;
1972 
1973   /* GDB stores the current frame_id when stepping in order to detects steps
1974      into subroutines.
1975      Since frames are computed differently when we're replaying, we need to
1976      recompute those stored frames and fix them up so we can still detect
1977      subroutines after we started replaying.  */
1978   TRY
1979     {
1980       struct frame_info *frame;
1981       struct frame_id frame_id;
1982       int upd_step_frame_id, upd_step_stack_frame_id;
1983 
1984       /* The current frame without replaying - computed via normal unwind.  */
1985       frame = get_thread_current_frame (tp);
1986       frame_id = get_frame_id (frame);
1987 
1988       /* Check if we need to update any stepping-related frame id's.  */
1989       upd_step_frame_id = frame_id_eq (frame_id,
1990 				       tp->control.step_frame_id);
1991       upd_step_stack_frame_id = frame_id_eq (frame_id,
1992 					     tp->control.step_stack_frame_id);
1993 
1994       /* We start replaying at the end of the branch trace.  This corresponds
1995 	 to the current instruction.  */
1996       replay = XNEW (struct btrace_insn_iterator);
1997       btrace_insn_end (replay, btinfo);
1998 
1999       /* Skip gaps at the end of the trace.  */
2000       while (btrace_insn_get (replay) == NULL)
2001 	{
2002 	  unsigned int steps;
2003 
2004 	  steps = btrace_insn_prev (replay, 1);
2005 	  if (steps == 0)
2006 	    error (_("No trace."));
2007 	}
2008 
2009       /* We're not replaying, yet.  */
2010       gdb_assert (btinfo->replay == NULL);
2011       btinfo->replay = replay;
2012 
2013       /* Make sure we're not using any stale registers.  */
2014       registers_changed_ptid (tp->ptid);
2015 
2016       /* The current frame with replaying - computed via btrace unwind.  */
2017       frame = get_thread_current_frame (tp);
2018       frame_id = get_frame_id (frame);
2019 
2020       /* Replace stepping related frames where necessary.  */
2021       if (upd_step_frame_id)
2022 	tp->control.step_frame_id = frame_id;
2023       if (upd_step_stack_frame_id)
2024 	tp->control.step_stack_frame_id = frame_id;
2025     }
2026   CATCH (except, RETURN_MASK_ALL)
2027     {
2028       xfree (btinfo->replay);
2029       btinfo->replay = NULL;
2030 
2031       registers_changed_ptid (tp->ptid);
2032 
2033       throw_exception (except);
2034     }
2035   END_CATCH
2036 
2037   return replay;
2038 }
2039 
2040 /* Stop replaying a thread.  */
2041 
2042 static void
2043 record_btrace_stop_replaying (struct thread_info *tp)
2044 {
2045   struct btrace_thread_info *btinfo;
2046 
2047   btinfo = &tp->btrace;
2048 
2049   xfree (btinfo->replay);
2050   btinfo->replay = NULL;
2051 
2052   /* Make sure we're not leaving any stale registers.  */
2053   registers_changed_ptid (tp->ptid);
2054 }
2055 
2056 /* Stop replaying TP if it is at the end of its execution history.  */
2057 
2058 static void
2059 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2060 {
2061   struct btrace_insn_iterator *replay, end;
2062   struct btrace_thread_info *btinfo;
2063 
2064   btinfo = &tp->btrace;
2065   replay = btinfo->replay;
2066 
2067   if (replay == NULL)
2068     return;
2069 
2070   btrace_insn_end (&end, btinfo);
2071 
2072   if (btrace_insn_cmp (replay, &end) == 0)
2073     record_btrace_stop_replaying (tp);
2074 }
2075 
2076 /* The to_resume method of target record-btrace.  */
2077 
2078 static void
2079 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2080 		      enum gdb_signal signal)
2081 {
2082   struct thread_info *tp;
2083   enum btrace_thread_flag flag, cflag;
2084 
2085   DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2086 	 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2087 	 step ? "step" : "cont");
2088 
2089   /* Store the execution direction of the last resume.
2090 
2091      If there is more than one to_resume call, we have to rely on infrun
2092      to not change the execution direction in-between.  */
2093   record_btrace_resume_exec_dir = execution_direction;
2094 
2095   /* As long as we're not replaying, just forward the request.
2096 
2097      For non-stop targets this means that no thread is replaying.  In order to
2098      make progress, we may need to explicitly move replaying threads to the end
2099      of their execution history.  */
2100   if ((execution_direction != EXEC_REVERSE)
2101       && !record_btrace_is_replaying (ops, minus_one_ptid))
2102     {
2103       ops = ops->beneath;
2104       ops->to_resume (ops, ptid, step, signal);
2105       return;
2106     }
2107 
2108   /* Compute the btrace thread flag for the requested move.  */
2109   if (execution_direction == EXEC_REVERSE)
2110     {
2111       flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2112       cflag = BTHR_RCONT;
2113     }
2114   else
2115     {
2116       flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2117       cflag = BTHR_CONT;
2118     }
2119 
2120   /* We just indicate the resume intent here.  The actual stepping happens in
2121      record_btrace_wait below.
2122 
2123      For all-stop targets, we only step INFERIOR_PTID and continue others.  */
2124   if (!target_is_non_stop_p ())
2125     {
2126       gdb_assert (ptid_match (inferior_ptid, ptid));
2127 
2128       ALL_NON_EXITED_THREADS (tp)
2129 	if (ptid_match (tp->ptid, ptid))
2130 	  {
2131 	    if (ptid_match (tp->ptid, inferior_ptid))
2132 	      record_btrace_resume_thread (tp, flag);
2133 	    else
2134 	      record_btrace_resume_thread (tp, cflag);
2135 	  }
2136     }
2137   else
2138     {
2139       ALL_NON_EXITED_THREADS (tp)
2140 	if (ptid_match (tp->ptid, ptid))
2141 	  record_btrace_resume_thread (tp, flag);
2142     }
2143 
2144   /* Async support.  */
2145   if (target_can_async_p ())
2146     {
2147       target_async (1);
2148       mark_async_event_handler (record_btrace_async_inferior_event_handler);
2149     }
2150 }
2151 
2152 /* Cancel resuming TP.  */
2153 
2154 static void
2155 record_btrace_cancel_resume (struct thread_info *tp)
2156 {
2157   enum btrace_thread_flag flags;
2158 
2159   flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2160   if (flags == 0)
2161     return;
2162 
2163   DEBUG ("cancel resume thread %s (%s): %x (%s)",
2164 	 print_thread_id (tp),
2165 	 target_pid_to_str (tp->ptid), flags,
2166 	 btrace_thread_flag_to_str (flags));
2167 
2168   tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2169   record_btrace_stop_replaying_at_end (tp);
2170 }
2171 
2172 /* Return a target_waitstatus indicating that we ran out of history.  */
2173 
2174 static struct target_waitstatus
2175 btrace_step_no_history (void)
2176 {
2177   struct target_waitstatus status;
2178 
2179   status.kind = TARGET_WAITKIND_NO_HISTORY;
2180 
2181   return status;
2182 }
2183 
2184 /* Return a target_waitstatus indicating that a step finished.  */
2185 
2186 static struct target_waitstatus
2187 btrace_step_stopped (void)
2188 {
2189   struct target_waitstatus status;
2190 
2191   status.kind = TARGET_WAITKIND_STOPPED;
2192   status.value.sig = GDB_SIGNAL_TRAP;
2193 
2194   return status;
2195 }
2196 
2197 /* Return a target_waitstatus indicating that a thread was stopped as
2198    requested.  */
2199 
2200 static struct target_waitstatus
2201 btrace_step_stopped_on_request (void)
2202 {
2203   struct target_waitstatus status;
2204 
2205   status.kind = TARGET_WAITKIND_STOPPED;
2206   status.value.sig = GDB_SIGNAL_0;
2207 
2208   return status;
2209 }
2210 
2211 /* Return a target_waitstatus indicating a spurious stop.  */
2212 
2213 static struct target_waitstatus
2214 btrace_step_spurious (void)
2215 {
2216   struct target_waitstatus status;
2217 
2218   status.kind = TARGET_WAITKIND_SPURIOUS;
2219 
2220   return status;
2221 }
2222 
2223 /* Return a target_waitstatus indicating that the thread was not resumed.  */
2224 
2225 static struct target_waitstatus
2226 btrace_step_no_resumed (void)
2227 {
2228   struct target_waitstatus status;
2229 
2230   status.kind = TARGET_WAITKIND_NO_RESUMED;
2231 
2232   return status;
2233 }
2234 
2235 /* Return a target_waitstatus indicating that we should wait again.  */
2236 
2237 static struct target_waitstatus
2238 btrace_step_again (void)
2239 {
2240   struct target_waitstatus status;
2241 
2242   status.kind = TARGET_WAITKIND_IGNORE;
2243 
2244   return status;
2245 }
2246 
2247 /* Clear the record histories.  */
2248 
2249 static void
2250 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2251 {
2252   xfree (btinfo->insn_history);
2253   xfree (btinfo->call_history);
2254 
2255   btinfo->insn_history = NULL;
2256   btinfo->call_history = NULL;
2257 }
2258 
2259 /* Check whether TP's current replay position is at a breakpoint.  */
2260 
2261 static int
2262 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2263 {
2264   struct btrace_insn_iterator *replay;
2265   struct btrace_thread_info *btinfo;
2266   const struct btrace_insn *insn;
2267   struct inferior *inf;
2268 
2269   btinfo = &tp->btrace;
2270   replay = btinfo->replay;
2271 
2272   if (replay == NULL)
2273     return 0;
2274 
2275   insn = btrace_insn_get (replay);
2276   if (insn == NULL)
2277     return 0;
2278 
2279   inf = find_inferior_ptid (tp->ptid);
2280   if (inf == NULL)
2281     return 0;
2282 
2283   return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2284 					     &btinfo->stop_reason);
2285 }
2286 
2287 /* Step one instruction in forward direction.  */
2288 
2289 static struct target_waitstatus
2290 record_btrace_single_step_forward (struct thread_info *tp)
2291 {
2292   struct btrace_insn_iterator *replay, end;
2293   struct btrace_thread_info *btinfo;
2294 
2295   btinfo = &tp->btrace;
2296   replay = btinfo->replay;
2297 
2298   /* We're done if we're not replaying.  */
2299   if (replay == NULL)
2300     return btrace_step_no_history ();
2301 
2302   /* Check if we're stepping a breakpoint.  */
2303   if (record_btrace_replay_at_breakpoint (tp))
2304     return btrace_step_stopped ();
2305 
2306   /* Skip gaps during replay.  */
2307   do
2308     {
2309       unsigned int steps;
2310 
2311       /* We will bail out here if we continue stepping after reaching the end
2312 	 of the execution history.  */
2313       steps = btrace_insn_next (replay, 1);
2314       if (steps == 0)
2315 	return btrace_step_no_history ();
2316     }
2317   while (btrace_insn_get (replay) == NULL);
2318 
2319   /* Determine the end of the instruction trace.  */
2320   btrace_insn_end (&end, btinfo);
2321 
2322   /* The execution trace contains (and ends with) the current instruction.
2323      This instruction has not been executed, yet, so the trace really ends
2324      one instruction earlier.  */
2325   if (btrace_insn_cmp (replay, &end) == 0)
2326     return btrace_step_no_history ();
2327 
2328   return btrace_step_spurious ();
2329 }
2330 
2331 /* Step one instruction in backward direction.  */
2332 
2333 static struct target_waitstatus
2334 record_btrace_single_step_backward (struct thread_info *tp)
2335 {
2336   struct btrace_insn_iterator *replay;
2337   struct btrace_thread_info *btinfo;
2338 
2339   btinfo = &tp->btrace;
2340   replay = btinfo->replay;
2341 
2342   /* Start replaying if we're not already doing so.  */
2343   if (replay == NULL)
2344     replay = record_btrace_start_replaying (tp);
2345 
2346   /* If we can't step any further, we reached the end of the history.
2347      Skip gaps during replay.  */
2348   do
2349     {
2350       unsigned int steps;
2351 
2352       steps = btrace_insn_prev (replay, 1);
2353       if (steps == 0)
2354 	return btrace_step_no_history ();
2355     }
2356   while (btrace_insn_get (replay) == NULL);
2357 
2358   /* Check if we're stepping a breakpoint.
2359 
2360      For reverse-stepping, this check is after the step.  There is logic in
2361      infrun.c that handles reverse-stepping separately.  See, for example,
2362      proceed and adjust_pc_after_break.
2363 
2364      This code assumes that for reverse-stepping, PC points to the last
2365      de-executed instruction, whereas for forward-stepping PC points to the
2366      next to-be-executed instruction.  */
2367   if (record_btrace_replay_at_breakpoint (tp))
2368     return btrace_step_stopped ();
2369 
2370   return btrace_step_spurious ();
2371 }
2372 
2373 /* Step a single thread.  */
2374 
2375 static struct target_waitstatus
2376 record_btrace_step_thread (struct thread_info *tp)
2377 {
2378   struct btrace_thread_info *btinfo;
2379   struct target_waitstatus status;
2380   enum btrace_thread_flag flags;
2381 
2382   btinfo = &tp->btrace;
2383 
2384   flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2385   btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2386 
2387   DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2388 	 target_pid_to_str (tp->ptid), flags,
2389 	 btrace_thread_flag_to_str (flags));
2390 
2391   /* We can't step without an execution history.  */
2392   if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2393     return btrace_step_no_history ();
2394 
2395   switch (flags)
2396     {
2397     default:
2398       internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2399 
2400     case BTHR_STOP:
2401       return btrace_step_stopped_on_request ();
2402 
2403     case BTHR_STEP:
2404       status = record_btrace_single_step_forward (tp);
2405       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2406 	break;
2407 
2408       return btrace_step_stopped ();
2409 
2410     case BTHR_RSTEP:
2411       status = record_btrace_single_step_backward (tp);
2412       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2413 	break;
2414 
2415       return btrace_step_stopped ();
2416 
2417     case BTHR_CONT:
2418       status = record_btrace_single_step_forward (tp);
2419       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2420 	break;
2421 
2422       btinfo->flags |= flags;
2423       return btrace_step_again ();
2424 
2425     case BTHR_RCONT:
2426       status = record_btrace_single_step_backward (tp);
2427       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2428 	break;
2429 
2430       btinfo->flags |= flags;
2431       return btrace_step_again ();
2432     }
2433 
2434   /* We keep threads moving at the end of their execution history.  The to_wait
2435      method will stop the thread for whom the event is reported.  */
2436   if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2437     btinfo->flags |= flags;
2438 
2439   return status;
2440 }
2441 
2442 /* A vector of threads.  */
2443 
2444 typedef struct thread_info * tp_t;
2445 DEF_VEC_P (tp_t);
2446 
2447 /* Announce further events if necessary.  */
2448 
2449 static void
2450 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2451 				      const VEC (tp_t) *no_history)
2452 {
2453   int more_moving, more_no_history;
2454 
2455   more_moving = !VEC_empty (tp_t, moving);
2456   more_no_history = !VEC_empty (tp_t, no_history);
2457 
2458   if (!more_moving && !more_no_history)
2459     return;
2460 
2461   if (more_moving)
2462     DEBUG ("movers pending");
2463 
2464   if (more_no_history)
2465     DEBUG ("no-history pending");
2466 
2467   mark_async_event_handler (record_btrace_async_inferior_event_handler);
2468 }
2469 
2470 /* The to_wait method of target record-btrace.  */
2471 
2472 static ptid_t
2473 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2474 		    struct target_waitstatus *status, int options)
2475 {
2476   VEC (tp_t) *moving, *no_history;
2477   struct thread_info *tp, *eventing;
2478   struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2479 
2480   DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2481 
2482   /* As long as we're not replaying, just forward the request.  */
2483   if ((execution_direction != EXEC_REVERSE)
2484       && !record_btrace_is_replaying (ops, minus_one_ptid))
2485     {
2486       ops = ops->beneath;
2487       return ops->to_wait (ops, ptid, status, options);
2488     }
2489 
2490   moving = NULL;
2491   no_history = NULL;
2492 
2493   make_cleanup (VEC_cleanup (tp_t), &moving);
2494   make_cleanup (VEC_cleanup (tp_t), &no_history);
2495 
2496   /* Keep a work list of moving threads.  */
2497   ALL_NON_EXITED_THREADS (tp)
2498     if (ptid_match (tp->ptid, ptid)
2499 	&& ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2500       VEC_safe_push (tp_t, moving, tp);
2501 
2502   if (VEC_empty (tp_t, moving))
2503     {
2504       *status = btrace_step_no_resumed ();
2505 
2506       DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2507 	     target_waitstatus_to_string (status));
2508 
2509       do_cleanups (cleanups);
2510       return null_ptid;
2511     }
2512 
2513   /* Step moving threads one by one, one step each, until either one thread
2514      reports an event or we run out of threads to step.
2515 
2516      When stepping more than one thread, chances are that some threads reach
2517      the end of their execution history earlier than others.  If we reported
2518      this immediately, all-stop on top of non-stop would stop all threads and
2519      resume the same threads next time.  And we would report the same thread
2520      having reached the end of its execution history again.
2521 
2522      In the worst case, this would starve the other threads.  But even if other
2523      threads would be allowed to make progress, this would result in far too
2524      many intermediate stops.
2525 
2526      We therefore delay the reporting of "no execution history" until we have
2527      nothing else to report.  By this time, all threads should have moved to
2528      either the beginning or the end of their execution history.  There will
2529      be a single user-visible stop.  */
2530   eventing = NULL;
2531   while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2532     {
2533       unsigned int ix;
2534 
2535       ix = 0;
2536       while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2537 	{
2538 	  *status = record_btrace_step_thread (tp);
2539 
2540 	  switch (status->kind)
2541 	    {
2542 	    case TARGET_WAITKIND_IGNORE:
2543 	      ix++;
2544 	      break;
2545 
2546 	    case TARGET_WAITKIND_NO_HISTORY:
2547 	      VEC_safe_push (tp_t, no_history,
2548 			     VEC_ordered_remove (tp_t, moving, ix));
2549 	      break;
2550 
2551 	    default:
2552 	      eventing = VEC_unordered_remove (tp_t, moving, ix);
2553 	      break;
2554 	    }
2555 	}
2556     }
2557 
2558   if (eventing == NULL)
2559     {
2560       /* We started with at least one moving thread.  This thread must have
2561 	 either stopped or reached the end of its execution history.
2562 
2563 	 In the former case, EVENTING must not be NULL.
2564 	 In the latter case, NO_HISTORY must not be empty.  */
2565       gdb_assert (!VEC_empty (tp_t, no_history));
2566 
2567       /* We kept threads moving at the end of their execution history.  Stop
2568 	 EVENTING now that we are going to report its stop.  */
2569       eventing = VEC_unordered_remove (tp_t, no_history, 0);
2570       eventing->btrace.flags &= ~BTHR_MOVE;
2571 
2572       *status = btrace_step_no_history ();
2573     }
2574 
2575   gdb_assert (eventing != NULL);
2576 
2577   /* We kept threads replaying at the end of their execution history.  Stop
2578      replaying EVENTING now that we are going to report its stop.  */
2579   record_btrace_stop_replaying_at_end (eventing);
2580 
2581   /* Stop all other threads. */
2582   if (!target_is_non_stop_p ())
2583     ALL_NON_EXITED_THREADS (tp)
2584       record_btrace_cancel_resume (tp);
2585 
2586   /* In async mode, we need to announce further events.  */
2587   if (target_is_async_p ())
2588     record_btrace_maybe_mark_async_event (moving, no_history);
2589 
2590   /* Start record histories anew from the current position.  */
2591   record_btrace_clear_histories (&eventing->btrace);
2592 
2593   /* We moved the replay position but did not update registers.  */
2594   registers_changed_ptid (eventing->ptid);
2595 
2596   DEBUG ("wait ended by thread %s (%s): %s",
2597 	 print_thread_id (eventing),
2598 	 target_pid_to_str (eventing->ptid),
2599 	 target_waitstatus_to_string (status));
2600 
2601   do_cleanups (cleanups);
2602   return eventing->ptid;
2603 }
2604 
2605 /* The to_stop method of target record-btrace.  */
2606 
2607 static void
2608 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2609 {
2610   DEBUG ("stop %s", target_pid_to_str (ptid));
2611 
2612   /* As long as we're not replaying, just forward the request.  */
2613   if ((execution_direction != EXEC_REVERSE)
2614       && !record_btrace_is_replaying (ops, minus_one_ptid))
2615     {
2616       ops = ops->beneath;
2617       ops->to_stop (ops, ptid);
2618     }
2619   else
2620     {
2621       struct thread_info *tp;
2622 
2623       ALL_NON_EXITED_THREADS (tp)
2624        if (ptid_match (tp->ptid, ptid))
2625          {
2626            tp->btrace.flags &= ~BTHR_MOVE;
2627            tp->btrace.flags |= BTHR_STOP;
2628          }
2629     }
2630  }
2631 
2632 /* The to_can_execute_reverse method of target record-btrace.  */
2633 
2634 static int
2635 record_btrace_can_execute_reverse (struct target_ops *self)
2636 {
2637   return 1;
2638 }
2639 
2640 /* The to_stopped_by_sw_breakpoint method of target record-btrace.  */
2641 
2642 static int
2643 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2644 {
2645   if (record_btrace_is_replaying (ops, minus_one_ptid))
2646     {
2647       struct thread_info *tp = inferior_thread ();
2648 
2649       return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2650     }
2651 
2652   return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2653 }
2654 
2655 /* The to_supports_stopped_by_sw_breakpoint method of target
2656    record-btrace.  */
2657 
2658 static int
2659 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2660 {
2661   if (record_btrace_is_replaying (ops, minus_one_ptid))
2662     return 1;
2663 
2664   return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2665 }
2666 
2667 /* The to_stopped_by_sw_breakpoint method of target record-btrace.  */
2668 
2669 static int
2670 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2671 {
2672   if (record_btrace_is_replaying (ops, minus_one_ptid))
2673     {
2674       struct thread_info *tp = inferior_thread ();
2675 
2676       return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2677     }
2678 
2679   return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2680 }
2681 
2682 /* The to_supports_stopped_by_hw_breakpoint method of target
2683    record-btrace.  */
2684 
2685 static int
2686 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2687 {
2688   if (record_btrace_is_replaying (ops, minus_one_ptid))
2689     return 1;
2690 
2691   return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2692 }
2693 
2694 /* The to_update_thread_list method of target record-btrace.  */
2695 
2696 static void
2697 record_btrace_update_thread_list (struct target_ops *ops)
2698 {
2699   /* We don't add or remove threads during replay.  */
2700   if (record_btrace_is_replaying (ops, minus_one_ptid))
2701     return;
2702 
2703   /* Forward the request.  */
2704   ops = ops->beneath;
2705   ops->to_update_thread_list (ops);
2706 }
2707 
2708 /* The to_thread_alive method of target record-btrace.  */
2709 
2710 static int
2711 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2712 {
2713   /* We don't add or remove threads during replay.  */
2714   if (record_btrace_is_replaying (ops, minus_one_ptid))
2715     return find_thread_ptid (ptid) != NULL;
2716 
2717   /* Forward the request.  */
2718   ops = ops->beneath;
2719   return ops->to_thread_alive (ops, ptid);
2720 }
2721 
2722 /* Set the replay branch trace instruction iterator.  If IT is NULL, replay
2723    is stopped.  */
2724 
2725 static void
2726 record_btrace_set_replay (struct thread_info *tp,
2727 			  const struct btrace_insn_iterator *it)
2728 {
2729   struct btrace_thread_info *btinfo;
2730 
2731   btinfo = &tp->btrace;
2732 
2733   if (it == NULL || it->function == NULL)
2734     record_btrace_stop_replaying (tp);
2735   else
2736     {
2737       if (btinfo->replay == NULL)
2738 	record_btrace_start_replaying (tp);
2739       else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2740 	return;
2741 
2742       *btinfo->replay = *it;
2743       registers_changed_ptid (tp->ptid);
2744     }
2745 
2746   /* Start anew from the new replay position.  */
2747   record_btrace_clear_histories (btinfo);
2748 
2749   stop_pc = regcache_read_pc (get_current_regcache ());
2750   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2751 }
2752 
2753 /* The to_goto_record_begin method of target record-btrace.  */
2754 
2755 static void
2756 record_btrace_goto_begin (struct target_ops *self)
2757 {
2758   struct thread_info *tp;
2759   struct btrace_insn_iterator begin;
2760 
2761   tp = require_btrace_thread ();
2762 
2763   btrace_insn_begin (&begin, &tp->btrace);
2764   record_btrace_set_replay (tp, &begin);
2765 }
2766 
2767 /* The to_goto_record_end method of target record-btrace.  */
2768 
2769 static void
2770 record_btrace_goto_end (struct target_ops *ops)
2771 {
2772   struct thread_info *tp;
2773 
2774   tp = require_btrace_thread ();
2775 
2776   record_btrace_set_replay (tp, NULL);
2777 }
2778 
2779 /* The to_goto_record method of target record-btrace.  */
2780 
2781 static void
2782 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2783 {
2784   struct thread_info *tp;
2785   struct btrace_insn_iterator it;
2786   unsigned int number;
2787   int found;
2788 
2789   number = insn;
2790 
2791   /* Check for wrap-arounds.  */
2792   if (number != insn)
2793     error (_("Instruction number out of range."));
2794 
2795   tp = require_btrace_thread ();
2796 
2797   found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2798   if (found == 0)
2799     error (_("No such instruction."));
2800 
2801   record_btrace_set_replay (tp, &it);
2802 }
2803 
2804 /* The to_record_stop_replaying method of target record-btrace.  */
2805 
2806 static void
2807 record_btrace_stop_replaying_all (struct target_ops *self)
2808 {
2809   struct thread_info *tp;
2810 
2811   ALL_NON_EXITED_THREADS (tp)
2812     record_btrace_stop_replaying (tp);
2813 }
2814 
2815 /* The to_execution_direction target method.  */
2816 
2817 static enum exec_direction_kind
2818 record_btrace_execution_direction (struct target_ops *self)
2819 {
2820   return record_btrace_resume_exec_dir;
2821 }
2822 
2823 /* The to_prepare_to_generate_core target method.  */
2824 
2825 static void
2826 record_btrace_prepare_to_generate_core (struct target_ops *self)
2827 {
2828   record_btrace_generating_corefile = 1;
2829 }
2830 
2831 /* The to_done_generating_core target method.  */
2832 
2833 static void
2834 record_btrace_done_generating_core (struct target_ops *self)
2835 {
2836   record_btrace_generating_corefile = 0;
2837 }
2838 
2839 /* Initialize the record-btrace target ops.  */
2840 
2841 static void
2842 init_record_btrace_ops (void)
2843 {
2844   struct target_ops *ops;
2845 
2846   ops = &record_btrace_ops;
2847   ops->to_shortname = "record-btrace";
2848   ops->to_longname = "Branch tracing target";
2849   ops->to_doc = "Collect control-flow trace and provide the execution history.";
2850   ops->to_open = record_btrace_open;
2851   ops->to_close = record_btrace_close;
2852   ops->to_async = record_btrace_async;
2853   ops->to_detach = record_detach;
2854   ops->to_disconnect = record_btrace_disconnect;
2855   ops->to_mourn_inferior = record_mourn_inferior;
2856   ops->to_kill = record_kill;
2857   ops->to_stop_recording = record_btrace_stop_recording;
2858   ops->to_info_record = record_btrace_info;
2859   ops->to_insn_history = record_btrace_insn_history;
2860   ops->to_insn_history_from = record_btrace_insn_history_from;
2861   ops->to_insn_history_range = record_btrace_insn_history_range;
2862   ops->to_call_history = record_btrace_call_history;
2863   ops->to_call_history_from = record_btrace_call_history_from;
2864   ops->to_call_history_range = record_btrace_call_history_range;
2865   ops->to_record_is_replaying = record_btrace_is_replaying;
2866   ops->to_record_will_replay = record_btrace_will_replay;
2867   ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2868   ops->to_xfer_partial = record_btrace_xfer_partial;
2869   ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2870   ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2871   ops->to_fetch_registers = record_btrace_fetch_registers;
2872   ops->to_store_registers = record_btrace_store_registers;
2873   ops->to_prepare_to_store = record_btrace_prepare_to_store;
2874   ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2875   ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2876   ops->to_resume = record_btrace_resume;
2877   ops->to_wait = record_btrace_wait;
2878   ops->to_stop = record_btrace_stop;
2879   ops->to_update_thread_list = record_btrace_update_thread_list;
2880   ops->to_thread_alive = record_btrace_thread_alive;
2881   ops->to_goto_record_begin = record_btrace_goto_begin;
2882   ops->to_goto_record_end = record_btrace_goto_end;
2883   ops->to_goto_record = record_btrace_goto;
2884   ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2885   ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2886   ops->to_supports_stopped_by_sw_breakpoint
2887     = record_btrace_supports_stopped_by_sw_breakpoint;
2888   ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2889   ops->to_supports_stopped_by_hw_breakpoint
2890     = record_btrace_supports_stopped_by_hw_breakpoint;
2891   ops->to_execution_direction = record_btrace_execution_direction;
2892   ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2893   ops->to_done_generating_core = record_btrace_done_generating_core;
2894   ops->to_stratum = record_stratum;
2895   ops->to_magic = OPS_MAGIC;
2896 }
2897 
2898 /* Start recording in BTS format.  */
2899 
2900 static void
2901 cmd_record_btrace_bts_start (char *args, int from_tty)
2902 {
2903   if (args != NULL && *args != 0)
2904     error (_("Invalid argument."));
2905 
2906   record_btrace_conf.format = BTRACE_FORMAT_BTS;
2907 
2908   TRY
2909     {
2910       execute_command ("target record-btrace", from_tty);
2911     }
2912   CATCH (exception, RETURN_MASK_ALL)
2913     {
2914       record_btrace_conf.format = BTRACE_FORMAT_NONE;
2915       throw_exception (exception);
2916     }
2917   END_CATCH
2918 }
2919 
2920 /* Start recording in Intel Processor Trace format.  */
2921 
2922 static void
2923 cmd_record_btrace_pt_start (char *args, int from_tty)
2924 {
2925   if (args != NULL && *args != 0)
2926     error (_("Invalid argument."));
2927 
2928   record_btrace_conf.format = BTRACE_FORMAT_PT;
2929 
2930   TRY
2931     {
2932       execute_command ("target record-btrace", from_tty);
2933     }
2934   CATCH (exception, RETURN_MASK_ALL)
2935     {
2936       record_btrace_conf.format = BTRACE_FORMAT_NONE;
2937       throw_exception (exception);
2938     }
2939   END_CATCH
2940 }
2941 
2942 /* Alias for "target record".  */
2943 
2944 static void
2945 cmd_record_btrace_start (char *args, int from_tty)
2946 {
2947   if (args != NULL && *args != 0)
2948     error (_("Invalid argument."));
2949 
2950   record_btrace_conf.format = BTRACE_FORMAT_PT;
2951 
2952   TRY
2953     {
2954       execute_command ("target record-btrace", from_tty);
2955     }
2956   CATCH (exception, RETURN_MASK_ALL)
2957     {
2958       record_btrace_conf.format = BTRACE_FORMAT_BTS;
2959 
2960       TRY
2961 	{
2962 	  execute_command ("target record-btrace", from_tty);
2963 	}
2964       CATCH (exception, RETURN_MASK_ALL)
2965 	{
2966 	  record_btrace_conf.format = BTRACE_FORMAT_NONE;
2967 	  throw_exception (exception);
2968 	}
2969       END_CATCH
2970     }
2971   END_CATCH
2972 }
2973 
2974 /* The "set record btrace" command.  */
2975 
2976 static void
2977 cmd_set_record_btrace (char *args, int from_tty)
2978 {
2979   cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2980 }
2981 
2982 /* The "show record btrace" command.  */
2983 
2984 static void
2985 cmd_show_record_btrace (char *args, int from_tty)
2986 {
2987   cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2988 }
2989 
2990 /* The "show record btrace replay-memory-access" command.  */
2991 
2992 static void
2993 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2994 			       struct cmd_list_element *c, const char *value)
2995 {
2996   fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2997 		    replay_memory_access);
2998 }
2999 
3000 /* The "set record btrace bts" command.  */
3001 
3002 static void
3003 cmd_set_record_btrace_bts (char *args, int from_tty)
3004 {
3005   printf_unfiltered (_("\"set record btrace bts\" must be followed "
3006 		       "by an appropriate subcommand.\n"));
3007   help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3008 	     all_commands, gdb_stdout);
3009 }
3010 
3011 /* The "show record btrace bts" command.  */
3012 
3013 static void
3014 cmd_show_record_btrace_bts (char *args, int from_tty)
3015 {
3016   cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3017 }
3018 
3019 /* The "set record btrace pt" command.  */
3020 
3021 static void
3022 cmd_set_record_btrace_pt (char *args, int from_tty)
3023 {
3024   printf_unfiltered (_("\"set record btrace pt\" must be followed "
3025 		       "by an appropriate subcommand.\n"));
3026   help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3027 	     all_commands, gdb_stdout);
3028 }
3029 
3030 /* The "show record btrace pt" command.  */
3031 
3032 static void
3033 cmd_show_record_btrace_pt (char *args, int from_tty)
3034 {
3035   cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3036 }
3037 
3038 /* The "record bts buffer-size" show value function.  */
3039 
3040 static void
3041 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3042 				   struct cmd_list_element *c,
3043 				   const char *value)
3044 {
3045   fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3046 		    value);
3047 }
3048 
3049 /* The "record pt buffer-size" show value function.  */
3050 
3051 static void
3052 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3053 				  struct cmd_list_element *c,
3054 				  const char *value)
3055 {
3056   fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3057 		    value);
3058 }
3059 
3060 void _initialize_record_btrace (void);
3061 
3062 /* Initialize btrace commands.  */
3063 
3064 void
3065 _initialize_record_btrace (void)
3066 {
3067   add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3068 		  _("Start branch trace recording."), &record_btrace_cmdlist,
3069 		  "record btrace ", 0, &record_cmdlist);
3070   add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3071 
3072   add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3073 	   _("\
3074 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3075 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3076 This format may not be available on all processors."),
3077 	   &record_btrace_cmdlist);
3078   add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3079 
3080   add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3081 	   _("\
3082 Start branch trace recording in Intel Processor Trace format.\n\n\
3083 This format may not be available on all processors."),
3084 	   &record_btrace_cmdlist);
3085   add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3086 
3087   add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3088 		  _("Set record options"), &set_record_btrace_cmdlist,
3089 		  "set record btrace ", 0, &set_record_cmdlist);
3090 
3091   add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3092 		  _("Show record options"), &show_record_btrace_cmdlist,
3093 		  "show record btrace ", 0, &show_record_cmdlist);
3094 
3095   add_setshow_enum_cmd ("replay-memory-access", no_class,
3096 			replay_memory_access_types, &replay_memory_access, _("\
3097 Set what memory accesses are allowed during replay."), _("\
3098 Show what memory accesses are allowed during replay."),
3099 			   _("Default is READ-ONLY.\n\n\
3100 The btrace record target does not trace data.\n\
3101 The memory therefore corresponds to the live target and not \
3102 to the current replay position.\n\n\
3103 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3104 When READ-WRITE, allow accesses to read-only and read-write memory during \
3105 replay."),
3106 			   NULL, cmd_show_replay_memory_access,
3107 			   &set_record_btrace_cmdlist,
3108 			   &show_record_btrace_cmdlist);
3109 
3110   add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3111 		  _("Set record btrace bts options"),
3112 		  &set_record_btrace_bts_cmdlist,
3113 		  "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3114 
3115   add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3116 		  _("Show record btrace bts options"),
3117 		  &show_record_btrace_bts_cmdlist,
3118 		  "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3119 
3120   add_setshow_uinteger_cmd ("buffer-size", no_class,
3121 			    &record_btrace_conf.bts.size,
3122 			    _("Set the record/replay bts buffer size."),
3123 			    _("Show the record/replay bts buffer size."), _("\
3124 When starting recording request a trace buffer of this size.  \
3125 The actual buffer size may differ from the requested size.  \
3126 Use \"info record\" to see the actual buffer size.\n\n\
3127 Bigger buffers allow longer recording but also take more time to process \
3128 the recorded execution trace.\n\n\
3129 The trace buffer size may not be changed while recording."), NULL,
3130 			    show_record_bts_buffer_size_value,
3131 			    &set_record_btrace_bts_cmdlist,
3132 			    &show_record_btrace_bts_cmdlist);
3133 
3134   add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3135 		  _("Set record btrace pt options"),
3136 		  &set_record_btrace_pt_cmdlist,
3137 		  "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3138 
3139   add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3140 		  _("Show record btrace pt options"),
3141 		  &show_record_btrace_pt_cmdlist,
3142 		  "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3143 
3144   add_setshow_uinteger_cmd ("buffer-size", no_class,
3145 			    &record_btrace_conf.pt.size,
3146 			    _("Set the record/replay pt buffer size."),
3147 			    _("Show the record/replay pt buffer size."), _("\
3148 Bigger buffers allow longer recording but also take more time to process \
3149 the recorded execution.\n\
3150 The actual buffer size may differ from the requested size.  Use \"info record\" \
3151 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3152 			    &set_record_btrace_pt_cmdlist,
3153 			    &show_record_btrace_pt_cmdlist);
3154 
3155   init_record_btrace_ops ();
3156   add_target (&record_btrace_ops);
3157 
3158   bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3159 			       xcalloc, xfree);
3160 
3161   record_btrace_conf.bts.size = 64 * 1024;
3162   record_btrace_conf.pt.size = 16 * 1024;
3163 }
3164