xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/record-btrace.c (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1 /* Branch trace support for GDB, the GNU debugger.
2 
3    Copyright (C) 2013-2015 Free Software Foundation, Inc.
4 
5    Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 
41 /* The target_ops of record-btrace.  */
42 static struct target_ops record_btrace_ops;
43 
44 /* A new thread observer enabling branch tracing for the new thread.  */
45 static struct observer *record_btrace_thread_observer;
46 
47 /* Memory access types used in set/show record btrace replay-memory-access.  */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52   replay_memory_access_read_only,
53   replay_memory_access_read_write,
54   NULL
55 };
56 
57 /* The currently allowed replay memory access type.  */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59 
60 /* Command lists for "set/show record btrace".  */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63 
64 /* The execution direction of the last resume we got.  See record-full.c.  */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66 
67 /* The async event handler for reverse/replay execution.  */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69 
70 /* A flag indicating that we are currently generating a core file.  */
71 static int record_btrace_generating_corefile;
72 
73 /* Print a record-btrace debug message.  Use do ... while (0) to avoid
74    ambiguities when used in if statements.  */
75 
76 #define DEBUG(msg, args...)						\
77   do									\
78     {									\
79       if (record_debug != 0)						\
80         fprintf_unfiltered (gdb_stdlog,					\
81 			    "[record-btrace] " msg "\n", ##args);	\
82     }									\
83   while (0)
84 
85 
86 /* Update the branch trace for the current thread and return a pointer to its
87    thread_info.
88 
89    Throws an error if there is no thread or no trace.  This function never
90    returns NULL.  */
91 
92 static struct thread_info *
93 require_btrace_thread (void)
94 {
95   struct thread_info *tp;
96 
97   DEBUG ("require");
98 
99   tp = find_thread_ptid (inferior_ptid);
100   if (tp == NULL)
101     error (_("No thread."));
102 
103   btrace_fetch (tp);
104 
105   if (btrace_is_empty (tp))
106     error (_("No trace."));
107 
108   return tp;
109 }
110 
111 /* Update the branch trace for the current thread and return a pointer to its
112    branch trace information struct.
113 
114    Throws an error if there is no thread or no trace.  This function never
115    returns NULL.  */
116 
117 static struct btrace_thread_info *
118 require_btrace (void)
119 {
120   struct thread_info *tp;
121 
122   tp = require_btrace_thread ();
123 
124   return &tp->btrace;
125 }
126 
127 /* Enable branch tracing for one thread.  Warn on errors.  */
128 
129 static void
130 record_btrace_enable_warn (struct thread_info *tp)
131 {
132   volatile struct gdb_exception error;
133 
134   TRY_CATCH (error, RETURN_MASK_ERROR)
135     btrace_enable (tp);
136 
137   if (error.message != NULL)
138     warning ("%s", error.message);
139 }
140 
141 /* Callback function to disable branch tracing for one thread.  */
142 
143 static void
144 record_btrace_disable_callback (void *arg)
145 {
146   struct thread_info *tp;
147 
148   tp = arg;
149 
150   btrace_disable (tp);
151 }
152 
153 /* Enable automatic tracing of new threads.  */
154 
155 static void
156 record_btrace_auto_enable (void)
157 {
158   DEBUG ("attach thread observer");
159 
160   record_btrace_thread_observer
161     = observer_attach_new_thread (record_btrace_enable_warn);
162 }
163 
164 /* Disable automatic tracing of new threads.  */
165 
166 static void
167 record_btrace_auto_disable (void)
168 {
169   /* The observer may have been detached, already.  */
170   if (record_btrace_thread_observer == NULL)
171     return;
172 
173   DEBUG ("detach thread observer");
174 
175   observer_detach_new_thread (record_btrace_thread_observer);
176   record_btrace_thread_observer = NULL;
177 }
178 
179 /* The record-btrace async event handler function.  */
180 
181 static void
182 record_btrace_handle_async_inferior_event (gdb_client_data data)
183 {
184   inferior_event_handler (INF_REG_EVENT, NULL);
185 }
186 
187 /* The to_open method of target record-btrace.  */
188 
189 static void
190 record_btrace_open (const char *args, int from_tty)
191 {
192   struct cleanup *disable_chain;
193   struct thread_info *tp;
194 
195   DEBUG ("open");
196 
197   record_preopen ();
198 
199   if (!target_has_execution)
200     error (_("The program is not being run."));
201 
202   if (!target_supports_btrace ())
203     error (_("Target does not support branch tracing."));
204 
205   if (non_stop)
206     error (_("Record btrace can't debug inferior in non-stop mode."));
207 
208   gdb_assert (record_btrace_thread_observer == NULL);
209 
210   disable_chain = make_cleanup (null_cleanup, NULL);
211   ALL_NON_EXITED_THREADS (tp)
212     if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
213       {
214 	btrace_enable (tp);
215 
216 	make_cleanup (record_btrace_disable_callback, tp);
217       }
218 
219   record_btrace_auto_enable ();
220 
221   push_target (&record_btrace_ops);
222 
223   record_btrace_async_inferior_event_handler
224     = create_async_event_handler (record_btrace_handle_async_inferior_event,
225 				  NULL);
226   record_btrace_generating_corefile = 0;
227 
228   observer_notify_record_changed (current_inferior (),  1);
229 
230   discard_cleanups (disable_chain);
231 }
232 
233 /* The to_stop_recording method of target record-btrace.  */
234 
235 static void
236 record_btrace_stop_recording (struct target_ops *self)
237 {
238   struct thread_info *tp;
239 
240   DEBUG ("stop recording");
241 
242   record_btrace_auto_disable ();
243 
244   ALL_NON_EXITED_THREADS (tp)
245     if (tp->btrace.target != NULL)
246       btrace_disable (tp);
247 }
248 
249 /* The to_close method of target record-btrace.  */
250 
251 static void
252 record_btrace_close (struct target_ops *self)
253 {
254   struct thread_info *tp;
255 
256   if (record_btrace_async_inferior_event_handler != NULL)
257     delete_async_event_handler (&record_btrace_async_inferior_event_handler);
258 
259   /* Make sure automatic recording gets disabled even if we did not stop
260      recording before closing the record-btrace target.  */
261   record_btrace_auto_disable ();
262 
263   /* We should have already stopped recording.
264      Tear down btrace in case we have not.  */
265   ALL_NON_EXITED_THREADS (tp)
266     btrace_teardown (tp);
267 }
268 
269 /* The to_async method of target record-btrace.  */
270 
271 static void
272 record_btrace_async (struct target_ops *ops,
273 		     void (*callback) (enum inferior_event_type event_type,
274 				       void *context),
275 		     void *context)
276 {
277   if (callback != NULL)
278     mark_async_event_handler (record_btrace_async_inferior_event_handler);
279   else
280     clear_async_event_handler (record_btrace_async_inferior_event_handler);
281 
282   ops->beneath->to_async (ops->beneath, callback, context);
283 }
284 
285 /* The to_info_record method of target record-btrace.  */
286 
287 static void
288 record_btrace_info (struct target_ops *self)
289 {
290   struct btrace_thread_info *btinfo;
291   struct thread_info *tp;
292   unsigned int insns, calls;
293 
294   DEBUG ("info");
295 
296   tp = find_thread_ptid (inferior_ptid);
297   if (tp == NULL)
298     error (_("No thread."));
299 
300   btrace_fetch (tp);
301 
302   insns = 0;
303   calls = 0;
304 
305   btinfo = &tp->btrace;
306 
307   if (!btrace_is_empty (tp))
308     {
309       struct btrace_call_iterator call;
310       struct btrace_insn_iterator insn;
311 
312       btrace_call_end (&call, btinfo);
313       btrace_call_prev (&call, 1);
314       calls = btrace_call_number (&call);
315 
316       btrace_insn_end (&insn, btinfo);
317       btrace_insn_prev (&insn, 1);
318       insns = btrace_insn_number (&insn);
319     }
320 
321   printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
322 		       "%d (%s).\n"), insns, calls, tp->num,
323 		     target_pid_to_str (tp->ptid));
324 
325   if (btrace_is_replaying (tp))
326     printf_unfiltered (_("Replay in progress.  At instruction %u.\n"),
327 		       btrace_insn_number (btinfo->replay));
328 }
329 
330 /* Print an unsigned int.  */
331 
332 static void
333 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
334 {
335   ui_out_field_fmt (uiout, fld, "%u", val);
336 }
337 
338 /* Disassemble a section of the recorded instruction trace.  */
339 
340 static void
341 btrace_insn_history (struct ui_out *uiout,
342 		     const struct btrace_insn_iterator *begin,
343 		     const struct btrace_insn_iterator *end, int flags)
344 {
345   struct gdbarch *gdbarch;
346   struct btrace_insn_iterator it;
347 
348   DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
349 	 btrace_insn_number (end));
350 
351   gdbarch = target_gdbarch ();
352 
353   for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
354     {
355       const struct btrace_insn *insn;
356 
357       insn = btrace_insn_get (&it);
358 
359       /* Print the instruction index.  */
360       ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
361       ui_out_text (uiout, "\t");
362 
363       /* Disassembly with '/m' flag may not produce the expected result.
364 	 See PR gdb/11833.  */
365       gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
366     }
367 }
368 
369 /* The to_insn_history method of target record-btrace.  */
370 
371 static void
372 record_btrace_insn_history (struct target_ops *self, int size, int flags)
373 {
374   struct btrace_thread_info *btinfo;
375   struct btrace_insn_history *history;
376   struct btrace_insn_iterator begin, end;
377   struct cleanup *uiout_cleanup;
378   struct ui_out *uiout;
379   unsigned int context, covered;
380 
381   uiout = current_uiout;
382   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
383 						       "insn history");
384   context = abs (size);
385   if (context == 0)
386     error (_("Bad record instruction-history-size."));
387 
388   btinfo = require_btrace ();
389   history = btinfo->insn_history;
390   if (history == NULL)
391     {
392       struct btrace_insn_iterator *replay;
393 
394       DEBUG ("insn-history (0x%x): %d", flags, size);
395 
396       /* If we're replaying, we start at the replay position.  Otherwise, we
397 	 start at the tail of the trace.  */
398       replay = btinfo->replay;
399       if (replay != NULL)
400 	begin = *replay;
401       else
402 	btrace_insn_end (&begin, btinfo);
403 
404       /* We start from here and expand in the requested direction.  Then we
405 	 expand in the other direction, as well, to fill up any remaining
406 	 context.  */
407       end = begin;
408       if (size < 0)
409 	{
410 	  /* We want the current position covered, as well.  */
411 	  covered = btrace_insn_next (&end, 1);
412 	  covered += btrace_insn_prev (&begin, context - covered);
413 	  covered += btrace_insn_next (&end, context - covered);
414 	}
415       else
416 	{
417 	  covered = btrace_insn_next (&end, context);
418 	  covered += btrace_insn_prev (&begin, context - covered);
419 	}
420     }
421   else
422     {
423       begin = history->begin;
424       end = history->end;
425 
426       DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
427 	     btrace_insn_number (&begin), btrace_insn_number (&end));
428 
429       if (size < 0)
430 	{
431 	  end = begin;
432 	  covered = btrace_insn_prev (&begin, context);
433 	}
434       else
435 	{
436 	  begin = end;
437 	  covered = btrace_insn_next (&end, context);
438 	}
439     }
440 
441   if (covered > 0)
442     btrace_insn_history (uiout, &begin, &end, flags);
443   else
444     {
445       if (size < 0)
446 	printf_unfiltered (_("At the start of the branch trace record.\n"));
447       else
448 	printf_unfiltered (_("At the end of the branch trace record.\n"));
449     }
450 
451   btrace_set_insn_history (btinfo, &begin, &end);
452   do_cleanups (uiout_cleanup);
453 }
454 
455 /* The to_insn_history_range method of target record-btrace.  */
456 
457 static void
458 record_btrace_insn_history_range (struct target_ops *self,
459 				  ULONGEST from, ULONGEST to, int flags)
460 {
461   struct btrace_thread_info *btinfo;
462   struct btrace_insn_history *history;
463   struct btrace_insn_iterator begin, end;
464   struct cleanup *uiout_cleanup;
465   struct ui_out *uiout;
466   unsigned int low, high;
467   int found;
468 
469   uiout = current_uiout;
470   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
471 						       "insn history");
472   low = from;
473   high = to;
474 
475   DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
476 
477   /* Check for wrap-arounds.  */
478   if (low != from || high != to)
479     error (_("Bad range."));
480 
481   if (high < low)
482     error (_("Bad range."));
483 
484   btinfo = require_btrace ();
485 
486   found = btrace_find_insn_by_number (&begin, btinfo, low);
487   if (found == 0)
488     error (_("Range out of bounds."));
489 
490   found = btrace_find_insn_by_number (&end, btinfo, high);
491   if (found == 0)
492     {
493       /* Silently truncate the range.  */
494       btrace_insn_end (&end, btinfo);
495     }
496   else
497     {
498       /* We want both begin and end to be inclusive.  */
499       btrace_insn_next (&end, 1);
500     }
501 
502   btrace_insn_history (uiout, &begin, &end, flags);
503   btrace_set_insn_history (btinfo, &begin, &end);
504 
505   do_cleanups (uiout_cleanup);
506 }
507 
508 /* The to_insn_history_from method of target record-btrace.  */
509 
510 static void
511 record_btrace_insn_history_from (struct target_ops *self,
512 				 ULONGEST from, int size, int flags)
513 {
514   ULONGEST begin, end, context;
515 
516   context = abs (size);
517   if (context == 0)
518     error (_("Bad record instruction-history-size."));
519 
520   if (size < 0)
521     {
522       end = from;
523 
524       if (from < context)
525 	begin = 0;
526       else
527 	begin = from - context + 1;
528     }
529   else
530     {
531       begin = from;
532       end = from + context - 1;
533 
534       /* Check for wrap-around.  */
535       if (end < begin)
536 	end = ULONGEST_MAX;
537     }
538 
539   record_btrace_insn_history_range (self, begin, end, flags);
540 }
541 
542 /* Print the instruction number range for a function call history line.  */
543 
544 static void
545 btrace_call_history_insn_range (struct ui_out *uiout,
546 				const struct btrace_function *bfun)
547 {
548   unsigned int begin, end, size;
549 
550   size = VEC_length (btrace_insn_s, bfun->insn);
551   gdb_assert (size > 0);
552 
553   begin = bfun->insn_offset;
554   end = begin + size - 1;
555 
556   ui_out_field_uint (uiout, "insn begin", begin);
557   ui_out_text (uiout, ",");
558   ui_out_field_uint (uiout, "insn end", end);
559 }
560 
561 /* Print the source line information for a function call history line.  */
562 
563 static void
564 btrace_call_history_src_line (struct ui_out *uiout,
565 			      const struct btrace_function *bfun)
566 {
567   struct symbol *sym;
568   int begin, end;
569 
570   sym = bfun->sym;
571   if (sym == NULL)
572     return;
573 
574   ui_out_field_string (uiout, "file",
575 		       symtab_to_filename_for_display (symbol_symtab (sym)));
576 
577   begin = bfun->lbegin;
578   end = bfun->lend;
579 
580   if (end < begin)
581     return;
582 
583   ui_out_text (uiout, ":");
584   ui_out_field_int (uiout, "min line", begin);
585 
586   if (end == begin)
587     return;
588 
589   ui_out_text (uiout, ",");
590   ui_out_field_int (uiout, "max line", end);
591 }
592 
593 /* Get the name of a branch trace function.  */
594 
595 static const char *
596 btrace_get_bfun_name (const struct btrace_function *bfun)
597 {
598   struct minimal_symbol *msym;
599   struct symbol *sym;
600 
601   if (bfun == NULL)
602     return "??";
603 
604   msym = bfun->msym;
605   sym = bfun->sym;
606 
607   if (sym != NULL)
608     return SYMBOL_PRINT_NAME (sym);
609   else if (msym != NULL)
610     return MSYMBOL_PRINT_NAME (msym);
611   else
612     return "??";
613 }
614 
615 /* Disassemble a section of the recorded function trace.  */
616 
617 static void
618 btrace_call_history (struct ui_out *uiout,
619 		     const struct btrace_thread_info *btinfo,
620 		     const struct btrace_call_iterator *begin,
621 		     const struct btrace_call_iterator *end,
622 		     enum record_print_flag flags)
623 {
624   struct btrace_call_iterator it;
625 
626   DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
627 	 btrace_call_number (end));
628 
629   for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
630     {
631       const struct btrace_function *bfun;
632       struct minimal_symbol *msym;
633       struct symbol *sym;
634 
635       bfun = btrace_call_get (&it);
636       sym = bfun->sym;
637       msym = bfun->msym;
638 
639       /* Print the function index.  */
640       ui_out_field_uint (uiout, "index", bfun->number);
641       ui_out_text (uiout, "\t");
642 
643       if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
644 	{
645 	  int level = bfun->level + btinfo->level, i;
646 
647 	  for (i = 0; i < level; ++i)
648 	    ui_out_text (uiout, "  ");
649 	}
650 
651       if (sym != NULL)
652 	ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
653       else if (msym != NULL)
654 	ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
655       else if (!ui_out_is_mi_like_p (uiout))
656 	ui_out_field_string (uiout, "function", "??");
657 
658       if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
659 	{
660 	  ui_out_text (uiout, _("\tinst "));
661 	  btrace_call_history_insn_range (uiout, bfun);
662 	}
663 
664       if ((flags & RECORD_PRINT_SRC_LINE) != 0)
665 	{
666 	  ui_out_text (uiout, _("\tat "));
667 	  btrace_call_history_src_line (uiout, bfun);
668 	}
669 
670       ui_out_text (uiout, "\n");
671     }
672 }
673 
674 /* The to_call_history method of target record-btrace.  */
675 
676 static void
677 record_btrace_call_history (struct target_ops *self, int size, int flags)
678 {
679   struct btrace_thread_info *btinfo;
680   struct btrace_call_history *history;
681   struct btrace_call_iterator begin, end;
682   struct cleanup *uiout_cleanup;
683   struct ui_out *uiout;
684   unsigned int context, covered;
685 
686   uiout = current_uiout;
687   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
688 						       "insn history");
689   context = abs (size);
690   if (context == 0)
691     error (_("Bad record function-call-history-size."));
692 
693   btinfo = require_btrace ();
694   history = btinfo->call_history;
695   if (history == NULL)
696     {
697       struct btrace_insn_iterator *replay;
698 
699       DEBUG ("call-history (0x%x): %d", flags, size);
700 
701       /* If we're replaying, we start at the replay position.  Otherwise, we
702 	 start at the tail of the trace.  */
703       replay = btinfo->replay;
704       if (replay != NULL)
705 	{
706 	  begin.function = replay->function;
707 	  begin.btinfo = btinfo;
708 	}
709       else
710 	btrace_call_end (&begin, btinfo);
711 
712       /* We start from here and expand in the requested direction.  Then we
713 	 expand in the other direction, as well, to fill up any remaining
714 	 context.  */
715       end = begin;
716       if (size < 0)
717 	{
718 	  /* We want the current position covered, as well.  */
719 	  covered = btrace_call_next (&end, 1);
720 	  covered += btrace_call_prev (&begin, context - covered);
721 	  covered += btrace_call_next (&end, context - covered);
722 	}
723       else
724 	{
725 	  covered = btrace_call_next (&end, context);
726 	  covered += btrace_call_prev (&begin, context- covered);
727 	}
728     }
729   else
730     {
731       begin = history->begin;
732       end = history->end;
733 
734       DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
735 	     btrace_call_number (&begin), btrace_call_number (&end));
736 
737       if (size < 0)
738 	{
739 	  end = begin;
740 	  covered = btrace_call_prev (&begin, context);
741 	}
742       else
743 	{
744 	  begin = end;
745 	  covered = btrace_call_next (&end, context);
746 	}
747     }
748 
749   if (covered > 0)
750     btrace_call_history (uiout, btinfo, &begin, &end, flags);
751   else
752     {
753       if (size < 0)
754 	printf_unfiltered (_("At the start of the branch trace record.\n"));
755       else
756 	printf_unfiltered (_("At the end of the branch trace record.\n"));
757     }
758 
759   btrace_set_call_history (btinfo, &begin, &end);
760   do_cleanups (uiout_cleanup);
761 }
762 
763 /* The to_call_history_range method of target record-btrace.  */
764 
765 static void
766 record_btrace_call_history_range (struct target_ops *self,
767 				  ULONGEST from, ULONGEST to, int flags)
768 {
769   struct btrace_thread_info *btinfo;
770   struct btrace_call_history *history;
771   struct btrace_call_iterator begin, end;
772   struct cleanup *uiout_cleanup;
773   struct ui_out *uiout;
774   unsigned int low, high;
775   int found;
776 
777   uiout = current_uiout;
778   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
779 						       "func history");
780   low = from;
781   high = to;
782 
783   DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
784 
785   /* Check for wrap-arounds.  */
786   if (low != from || high != to)
787     error (_("Bad range."));
788 
789   if (high < low)
790     error (_("Bad range."));
791 
792   btinfo = require_btrace ();
793 
794   found = btrace_find_call_by_number (&begin, btinfo, low);
795   if (found == 0)
796     error (_("Range out of bounds."));
797 
798   found = btrace_find_call_by_number (&end, btinfo, high);
799   if (found == 0)
800     {
801       /* Silently truncate the range.  */
802       btrace_call_end (&end, btinfo);
803     }
804   else
805     {
806       /* We want both begin and end to be inclusive.  */
807       btrace_call_next (&end, 1);
808     }
809 
810   btrace_call_history (uiout, btinfo, &begin, &end, flags);
811   btrace_set_call_history (btinfo, &begin, &end);
812 
813   do_cleanups (uiout_cleanup);
814 }
815 
816 /* The to_call_history_from method of target record-btrace.  */
817 
818 static void
819 record_btrace_call_history_from (struct target_ops *self,
820 				 ULONGEST from, int size, int flags)
821 {
822   ULONGEST begin, end, context;
823 
824   context = abs (size);
825   if (context == 0)
826     error (_("Bad record function-call-history-size."));
827 
828   if (size < 0)
829     {
830       end = from;
831 
832       if (from < context)
833 	begin = 0;
834       else
835 	begin = from - context + 1;
836     }
837   else
838     {
839       begin = from;
840       end = from + context - 1;
841 
842       /* Check for wrap-around.  */
843       if (end < begin)
844 	end = ULONGEST_MAX;
845     }
846 
847   record_btrace_call_history_range (self, begin, end, flags);
848 }
849 
850 /* The to_record_is_replaying method of target record-btrace.  */
851 
852 static int
853 record_btrace_is_replaying (struct target_ops *self)
854 {
855   struct thread_info *tp;
856 
857   ALL_NON_EXITED_THREADS (tp)
858     if (btrace_is_replaying (tp))
859       return 1;
860 
861   return 0;
862 }
863 
864 /* The to_xfer_partial method of target record-btrace.  */
865 
866 static enum target_xfer_status
867 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
868 			    const char *annex, gdb_byte *readbuf,
869 			    const gdb_byte *writebuf, ULONGEST offset,
870 			    ULONGEST len, ULONGEST *xfered_len)
871 {
872   struct target_ops *t;
873 
874   /* Filter out requests that don't make sense during replay.  */
875   if (replay_memory_access == replay_memory_access_read_only
876       && !record_btrace_generating_corefile
877       && record_btrace_is_replaying (ops))
878     {
879       switch (object)
880 	{
881 	case TARGET_OBJECT_MEMORY:
882 	  {
883 	    struct target_section *section;
884 
885 	    /* We do not allow writing memory in general.  */
886 	    if (writebuf != NULL)
887 	      {
888 		*xfered_len = len;
889 		return TARGET_XFER_UNAVAILABLE;
890 	      }
891 
892 	    /* We allow reading readonly memory.  */
893 	    section = target_section_by_addr (ops, offset);
894 	    if (section != NULL)
895 	      {
896 		/* Check if the section we found is readonly.  */
897 		if ((bfd_get_section_flags (section->the_bfd_section->owner,
898 					    section->the_bfd_section)
899 		     & SEC_READONLY) != 0)
900 		  {
901 		    /* Truncate the request to fit into this section.  */
902 		    len = min (len, section->endaddr - offset);
903 		    break;
904 		  }
905 	      }
906 
907 	    *xfered_len = len;
908 	    return TARGET_XFER_UNAVAILABLE;
909 	  }
910 	}
911     }
912 
913   /* Forward the request.  */
914   ops = ops->beneath;
915   return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
916 			       offset, len, xfered_len);
917 }
918 
919 /* The to_insert_breakpoint method of target record-btrace.  */
920 
921 static int
922 record_btrace_insert_breakpoint (struct target_ops *ops,
923 				 struct gdbarch *gdbarch,
924 				 struct bp_target_info *bp_tgt)
925 {
926   volatile struct gdb_exception except;
927   const char *old;
928   int ret;
929 
930   /* Inserting breakpoints requires accessing memory.  Allow it for the
931      duration of this function.  */
932   old = replay_memory_access;
933   replay_memory_access = replay_memory_access_read_write;
934 
935   ret = 0;
936   TRY_CATCH (except, RETURN_MASK_ALL)
937     ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
938 
939   replay_memory_access = old;
940 
941   if (except.reason < 0)
942     throw_exception (except);
943 
944   return ret;
945 }
946 
947 /* The to_remove_breakpoint method of target record-btrace.  */
948 
949 static int
950 record_btrace_remove_breakpoint (struct target_ops *ops,
951 				 struct gdbarch *gdbarch,
952 				 struct bp_target_info *bp_tgt)
953 {
954   volatile struct gdb_exception except;
955   const char *old;
956   int ret;
957 
958   /* Removing breakpoints requires accessing memory.  Allow it for the
959      duration of this function.  */
960   old = replay_memory_access;
961   replay_memory_access = replay_memory_access_read_write;
962 
963   ret = 0;
964   TRY_CATCH (except, RETURN_MASK_ALL)
965     ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
966 
967   replay_memory_access = old;
968 
969   if (except.reason < 0)
970     throw_exception (except);
971 
972   return ret;
973 }
974 
975 /* The to_fetch_registers method of target record-btrace.  */
976 
977 static void
978 record_btrace_fetch_registers (struct target_ops *ops,
979 			       struct regcache *regcache, int regno)
980 {
981   struct btrace_insn_iterator *replay;
982   struct thread_info *tp;
983 
984   tp = find_thread_ptid (inferior_ptid);
985   gdb_assert (tp != NULL);
986 
987   replay = tp->btrace.replay;
988   if (replay != NULL && !record_btrace_generating_corefile)
989     {
990       const struct btrace_insn *insn;
991       struct gdbarch *gdbarch;
992       int pcreg;
993 
994       gdbarch = get_regcache_arch (regcache);
995       pcreg = gdbarch_pc_regnum (gdbarch);
996       if (pcreg < 0)
997 	return;
998 
999       /* We can only provide the PC register.  */
1000       if (regno >= 0 && regno != pcreg)
1001 	return;
1002 
1003       insn = btrace_insn_get (replay);
1004       gdb_assert (insn != NULL);
1005 
1006       regcache_raw_supply (regcache, regno, &insn->pc);
1007     }
1008   else
1009     {
1010       struct target_ops *t = ops->beneath;
1011 
1012       t->to_fetch_registers (t, regcache, regno);
1013     }
1014 }
1015 
1016 /* The to_store_registers method of target record-btrace.  */
1017 
1018 static void
1019 record_btrace_store_registers (struct target_ops *ops,
1020 			       struct regcache *regcache, int regno)
1021 {
1022   struct target_ops *t;
1023 
1024   if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1025     error (_("This record target does not allow writing registers."));
1026 
1027   gdb_assert (may_write_registers != 0);
1028 
1029   t = ops->beneath;
1030   t->to_store_registers (t, regcache, regno);
1031 }
1032 
1033 /* The to_prepare_to_store method of target record-btrace.  */
1034 
1035 static void
1036 record_btrace_prepare_to_store (struct target_ops *ops,
1037 				struct regcache *regcache)
1038 {
1039   struct target_ops *t;
1040 
1041   if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1042     return;
1043 
1044   t = ops->beneath;
1045   t->to_prepare_to_store (t, regcache);
1046 }
1047 
1048 /* The branch trace frame cache.  */
1049 
1050 struct btrace_frame_cache
1051 {
1052   /* The thread.  */
1053   struct thread_info *tp;
1054 
1055   /* The frame info.  */
1056   struct frame_info *frame;
1057 
1058   /* The branch trace function segment.  */
1059   const struct btrace_function *bfun;
1060 };
1061 
1062 /* A struct btrace_frame_cache hash table indexed by NEXT.  */
1063 
1064 static htab_t bfcache;
1065 
1066 /* hash_f for htab_create_alloc of bfcache.  */
1067 
1068 static hashval_t
1069 bfcache_hash (const void *arg)
1070 {
1071   const struct btrace_frame_cache *cache = arg;
1072 
1073   return htab_hash_pointer (cache->frame);
1074 }
1075 
1076 /* eq_f for htab_create_alloc of bfcache.  */
1077 
1078 static int
1079 bfcache_eq (const void *arg1, const void *arg2)
1080 {
1081   const struct btrace_frame_cache *cache1 = arg1;
1082   const struct btrace_frame_cache *cache2 = arg2;
1083 
1084   return cache1->frame == cache2->frame;
1085 }
1086 
1087 /* Create a new btrace frame cache.  */
1088 
1089 static struct btrace_frame_cache *
1090 bfcache_new (struct frame_info *frame)
1091 {
1092   struct btrace_frame_cache *cache;
1093   void **slot;
1094 
1095   cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1096   cache->frame = frame;
1097 
1098   slot = htab_find_slot (bfcache, cache, INSERT);
1099   gdb_assert (*slot == NULL);
1100   *slot = cache;
1101 
1102   return cache;
1103 }
1104 
1105 /* Extract the branch trace function from a branch trace frame.  */
1106 
1107 static const struct btrace_function *
1108 btrace_get_frame_function (struct frame_info *frame)
1109 {
1110   const struct btrace_frame_cache *cache;
1111   const struct btrace_function *bfun;
1112   struct btrace_frame_cache pattern;
1113   void **slot;
1114 
1115   pattern.frame = frame;
1116 
1117   slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1118   if (slot == NULL)
1119     return NULL;
1120 
1121   cache = *slot;
1122   return cache->bfun;
1123 }
1124 
1125 /* Implement stop_reason method for record_btrace_frame_unwind.  */
1126 
1127 static enum unwind_stop_reason
1128 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1129 					void **this_cache)
1130 {
1131   const struct btrace_frame_cache *cache;
1132   const struct btrace_function *bfun;
1133 
1134   cache = *this_cache;
1135   bfun = cache->bfun;
1136   gdb_assert (bfun != NULL);
1137 
1138   if (bfun->up == NULL)
1139     return UNWIND_UNAVAILABLE;
1140 
1141   return UNWIND_NO_REASON;
1142 }
1143 
1144 /* Implement this_id method for record_btrace_frame_unwind.  */
1145 
1146 static void
1147 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1148 			     struct frame_id *this_id)
1149 {
1150   const struct btrace_frame_cache *cache;
1151   const struct btrace_function *bfun;
1152   CORE_ADDR code, special;
1153 
1154   cache = *this_cache;
1155 
1156   bfun = cache->bfun;
1157   gdb_assert (bfun != NULL);
1158 
1159   while (bfun->segment.prev != NULL)
1160     bfun = bfun->segment.prev;
1161 
1162   code = get_frame_func (this_frame);
1163   special = bfun->number;
1164 
1165   *this_id = frame_id_build_unavailable_stack_special (code, special);
1166 
1167   DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1168 	 btrace_get_bfun_name (cache->bfun),
1169 	 core_addr_to_string_nz (this_id->code_addr),
1170 	 core_addr_to_string_nz (this_id->special_addr));
1171 }
1172 
1173 /* Implement prev_register method for record_btrace_frame_unwind.  */
1174 
1175 static struct value *
1176 record_btrace_frame_prev_register (struct frame_info *this_frame,
1177 				   void **this_cache,
1178 				   int regnum)
1179 {
1180   const struct btrace_frame_cache *cache;
1181   const struct btrace_function *bfun, *caller;
1182   const struct btrace_insn *insn;
1183   struct gdbarch *gdbarch;
1184   CORE_ADDR pc;
1185   int pcreg;
1186 
1187   gdbarch = get_frame_arch (this_frame);
1188   pcreg = gdbarch_pc_regnum (gdbarch);
1189   if (pcreg < 0 || regnum != pcreg)
1190     throw_error (NOT_AVAILABLE_ERROR,
1191 		 _("Registers are not available in btrace record history"));
1192 
1193   cache = *this_cache;
1194   bfun = cache->bfun;
1195   gdb_assert (bfun != NULL);
1196 
1197   caller = bfun->up;
1198   if (caller == NULL)
1199     throw_error (NOT_AVAILABLE_ERROR,
1200 		 _("No caller in btrace record history"));
1201 
1202   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1203     {
1204       insn = VEC_index (btrace_insn_s, caller->insn, 0);
1205       pc = insn->pc;
1206     }
1207   else
1208     {
1209       insn = VEC_last (btrace_insn_s, caller->insn);
1210       pc = insn->pc;
1211 
1212       pc += gdb_insn_length (gdbarch, pc);
1213     }
1214 
1215   DEBUG ("[frame] unwound PC in %s on level %d: %s",
1216 	 btrace_get_bfun_name (bfun), bfun->level,
1217 	 core_addr_to_string_nz (pc));
1218 
1219   return frame_unwind_got_address (this_frame, regnum, pc);
1220 }
1221 
1222 /* Implement sniffer method for record_btrace_frame_unwind.  */
1223 
1224 static int
1225 record_btrace_frame_sniffer (const struct frame_unwind *self,
1226 			     struct frame_info *this_frame,
1227 			     void **this_cache)
1228 {
1229   const struct btrace_function *bfun;
1230   struct btrace_frame_cache *cache;
1231   struct thread_info *tp;
1232   struct frame_info *next;
1233 
1234   /* THIS_FRAME does not contain a reference to its thread.  */
1235   tp = find_thread_ptid (inferior_ptid);
1236   gdb_assert (tp != NULL);
1237 
1238   bfun = NULL;
1239   next = get_next_frame (this_frame);
1240   if (next == NULL)
1241     {
1242       const struct btrace_insn_iterator *replay;
1243 
1244       replay = tp->btrace.replay;
1245       if (replay != NULL)
1246 	bfun = replay->function;
1247     }
1248   else
1249     {
1250       const struct btrace_function *callee;
1251 
1252       callee = btrace_get_frame_function (next);
1253       if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1254 	bfun = callee->up;
1255     }
1256 
1257   if (bfun == NULL)
1258     return 0;
1259 
1260   DEBUG ("[frame] sniffed frame for %s on level %d",
1261 	 btrace_get_bfun_name (bfun), bfun->level);
1262 
1263   /* This is our frame.  Initialize the frame cache.  */
1264   cache = bfcache_new (this_frame);
1265   cache->tp = tp;
1266   cache->bfun = bfun;
1267 
1268   *this_cache = cache;
1269   return 1;
1270 }
1271 
1272 /* Implement sniffer method for record_btrace_tailcall_frame_unwind.  */
1273 
1274 static int
1275 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1276 				      struct frame_info *this_frame,
1277 				      void **this_cache)
1278 {
1279   const struct btrace_function *bfun, *callee;
1280   struct btrace_frame_cache *cache;
1281   struct frame_info *next;
1282 
1283   next = get_next_frame (this_frame);
1284   if (next == NULL)
1285     return 0;
1286 
1287   callee = btrace_get_frame_function (next);
1288   if (callee == NULL)
1289     return 0;
1290 
1291   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1292     return 0;
1293 
1294   bfun = callee->up;
1295   if (bfun == NULL)
1296     return 0;
1297 
1298   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1299 	 btrace_get_bfun_name (bfun), bfun->level);
1300 
1301   /* This is our frame.  Initialize the frame cache.  */
1302   cache = bfcache_new (this_frame);
1303   cache->tp = find_thread_ptid (inferior_ptid);
1304   cache->bfun = bfun;
1305 
1306   *this_cache = cache;
1307   return 1;
1308 }
1309 
1310 static void
1311 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1312 {
1313   struct btrace_frame_cache *cache;
1314   void **slot;
1315 
1316   cache = this_cache;
1317 
1318   slot = htab_find_slot (bfcache, cache, NO_INSERT);
1319   gdb_assert (slot != NULL);
1320 
1321   htab_remove_elt (bfcache, cache);
1322 }
1323 
1324 /* btrace recording does not store previous memory content, neither the stack
1325    frames content.  Any unwinding would return errorneous results as the stack
1326    contents no longer matches the changed PC value restored from history.
1327    Therefore this unwinder reports any possibly unwound registers as
1328    <unavailable>.  */
1329 
1330 const struct frame_unwind record_btrace_frame_unwind =
1331 {
1332   NORMAL_FRAME,
1333   record_btrace_frame_unwind_stop_reason,
1334   record_btrace_frame_this_id,
1335   record_btrace_frame_prev_register,
1336   NULL,
1337   record_btrace_frame_sniffer,
1338   record_btrace_frame_dealloc_cache
1339 };
1340 
1341 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1342 {
1343   TAILCALL_FRAME,
1344   record_btrace_frame_unwind_stop_reason,
1345   record_btrace_frame_this_id,
1346   record_btrace_frame_prev_register,
1347   NULL,
1348   record_btrace_tailcall_frame_sniffer,
1349   record_btrace_frame_dealloc_cache
1350 };
1351 
1352 /* Implement the to_get_unwinder method.  */
1353 
1354 static const struct frame_unwind *
1355 record_btrace_to_get_unwinder (struct target_ops *self)
1356 {
1357   return &record_btrace_frame_unwind;
1358 }
1359 
1360 /* Implement the to_get_tailcall_unwinder method.  */
1361 
1362 static const struct frame_unwind *
1363 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1364 {
1365   return &record_btrace_tailcall_frame_unwind;
1366 }
1367 
1368 /* Indicate that TP should be resumed according to FLAG.  */
1369 
1370 static void
1371 record_btrace_resume_thread (struct thread_info *tp,
1372 			     enum btrace_thread_flag flag)
1373 {
1374   struct btrace_thread_info *btinfo;
1375 
1376   DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1377 
1378   btinfo = &tp->btrace;
1379 
1380   if ((btinfo->flags & BTHR_MOVE) != 0)
1381     error (_("Thread already moving."));
1382 
1383   /* Fetch the latest branch trace.  */
1384   btrace_fetch (tp);
1385 
1386   btinfo->flags |= flag;
1387 }
1388 
1389 /* Find the thread to resume given a PTID.  */
1390 
1391 static struct thread_info *
1392 record_btrace_find_resume_thread (ptid_t ptid)
1393 {
1394   struct thread_info *tp;
1395 
1396   /* When asked to resume everything, we pick the current thread.  */
1397   if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1398     ptid = inferior_ptid;
1399 
1400   return find_thread_ptid (ptid);
1401 }
1402 
1403 /* Start replaying a thread.  */
1404 
1405 static struct btrace_insn_iterator *
1406 record_btrace_start_replaying (struct thread_info *tp)
1407 {
1408   volatile struct gdb_exception except;
1409   struct btrace_insn_iterator *replay;
1410   struct btrace_thread_info *btinfo;
1411   int executing;
1412 
1413   btinfo = &tp->btrace;
1414   replay = NULL;
1415 
1416   /* We can't start replaying without trace.  */
1417   if (btinfo->begin == NULL)
1418     return NULL;
1419 
1420   /* Clear the executing flag to allow changes to the current frame.
1421      We are not actually running, yet.  We just started a reverse execution
1422      command or a record goto command.
1423      For the latter, EXECUTING is false and this has no effect.
1424      For the former, EXECUTING is true and we're in to_wait, about to
1425      move the thread.  Since we need to recompute the stack, we temporarily
1426      set EXECUTING to flase.  */
1427   executing = is_executing (tp->ptid);
1428   set_executing (tp->ptid, 0);
1429 
1430   /* GDB stores the current frame_id when stepping in order to detects steps
1431      into subroutines.
1432      Since frames are computed differently when we're replaying, we need to
1433      recompute those stored frames and fix them up so we can still detect
1434      subroutines after we started replaying.  */
1435   TRY_CATCH (except, RETURN_MASK_ALL)
1436     {
1437       struct frame_info *frame;
1438       struct frame_id frame_id;
1439       int upd_step_frame_id, upd_step_stack_frame_id;
1440 
1441       /* The current frame without replaying - computed via normal unwind.  */
1442       frame = get_current_frame ();
1443       frame_id = get_frame_id (frame);
1444 
1445       /* Check if we need to update any stepping-related frame id's.  */
1446       upd_step_frame_id = frame_id_eq (frame_id,
1447 				       tp->control.step_frame_id);
1448       upd_step_stack_frame_id = frame_id_eq (frame_id,
1449 					     tp->control.step_stack_frame_id);
1450 
1451       /* We start replaying at the end of the branch trace.  This corresponds
1452 	 to the current instruction.  */
1453       replay = xmalloc (sizeof (*replay));
1454       btrace_insn_end (replay, btinfo);
1455 
1456       /* We're not replaying, yet.  */
1457       gdb_assert (btinfo->replay == NULL);
1458       btinfo->replay = replay;
1459 
1460       /* Make sure we're not using any stale registers.  */
1461       registers_changed_ptid (tp->ptid);
1462 
1463       /* The current frame with replaying - computed via btrace unwind.  */
1464       frame = get_current_frame ();
1465       frame_id = get_frame_id (frame);
1466 
1467       /* Replace stepping related frames where necessary.  */
1468       if (upd_step_frame_id)
1469 	tp->control.step_frame_id = frame_id;
1470       if (upd_step_stack_frame_id)
1471 	tp->control.step_stack_frame_id = frame_id;
1472     }
1473 
1474   /* Restore the previous execution state.  */
1475   set_executing (tp->ptid, executing);
1476 
1477   if (except.reason < 0)
1478     {
1479       xfree (btinfo->replay);
1480       btinfo->replay = NULL;
1481 
1482       registers_changed_ptid (tp->ptid);
1483 
1484       throw_exception (except);
1485     }
1486 
1487   return replay;
1488 }
1489 
1490 /* Stop replaying a thread.  */
1491 
1492 static void
1493 record_btrace_stop_replaying (struct thread_info *tp)
1494 {
1495   struct btrace_thread_info *btinfo;
1496 
1497   btinfo = &tp->btrace;
1498 
1499   xfree (btinfo->replay);
1500   btinfo->replay = NULL;
1501 
1502   /* Make sure we're not leaving any stale registers.  */
1503   registers_changed_ptid (tp->ptid);
1504 }
1505 
1506 /* The to_resume method of target record-btrace.  */
1507 
1508 static void
1509 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1510 		      enum gdb_signal signal)
1511 {
1512   struct thread_info *tp, *other;
1513   enum btrace_thread_flag flag;
1514 
1515   DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1516 
1517   /* Store the execution direction of the last resume.  */
1518   record_btrace_resume_exec_dir = execution_direction;
1519 
1520   tp = record_btrace_find_resume_thread (ptid);
1521   if (tp == NULL)
1522     error (_("Cannot find thread to resume."));
1523 
1524   /* Stop replaying other threads if the thread to resume is not replaying.  */
1525   if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1526     ALL_NON_EXITED_THREADS (other)
1527       record_btrace_stop_replaying (other);
1528 
1529   /* As long as we're not replaying, just forward the request.  */
1530   if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1531     {
1532       ops = ops->beneath;
1533       return ops->to_resume (ops, ptid, step, signal);
1534     }
1535 
1536   /* Compute the btrace thread flag for the requested move.  */
1537   if (step == 0)
1538     flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1539   else
1540     flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1541 
1542   /* At the moment, we only move a single thread.  We could also move
1543      all threads in parallel by single-stepping each resumed thread
1544      until the first runs into an event.
1545      When we do that, we would want to continue all other threads.
1546      For now, just resume one thread to not confuse to_wait.  */
1547   record_btrace_resume_thread (tp, flag);
1548 
1549   /* We just indicate the resume intent here.  The actual stepping happens in
1550      record_btrace_wait below.  */
1551 
1552   /* Async support.  */
1553   if (target_can_async_p ())
1554     {
1555       target_async (inferior_event_handler, 0);
1556       mark_async_event_handler (record_btrace_async_inferior_event_handler);
1557     }
1558 }
1559 
1560 /* Find a thread to move.  */
1561 
1562 static struct thread_info *
1563 record_btrace_find_thread_to_move (ptid_t ptid)
1564 {
1565   struct thread_info *tp;
1566 
1567   /* First check the parameter thread.  */
1568   tp = find_thread_ptid (ptid);
1569   if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1570     return tp;
1571 
1572   /* Otherwise, find one other thread that has been resumed.  */
1573   ALL_NON_EXITED_THREADS (tp)
1574     if ((tp->btrace.flags & BTHR_MOVE) != 0)
1575       return tp;
1576 
1577   return NULL;
1578 }
1579 
1580 /* Return a target_waitstatus indicating that we ran out of history.  */
1581 
1582 static struct target_waitstatus
1583 btrace_step_no_history (void)
1584 {
1585   struct target_waitstatus status;
1586 
1587   status.kind = TARGET_WAITKIND_NO_HISTORY;
1588 
1589   return status;
1590 }
1591 
1592 /* Return a target_waitstatus indicating that a step finished.  */
1593 
1594 static struct target_waitstatus
1595 btrace_step_stopped (void)
1596 {
1597   struct target_waitstatus status;
1598 
1599   status.kind = TARGET_WAITKIND_STOPPED;
1600   status.value.sig = GDB_SIGNAL_TRAP;
1601 
1602   return status;
1603 }
1604 
1605 /* Clear the record histories.  */
1606 
1607 static void
1608 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1609 {
1610   xfree (btinfo->insn_history);
1611   xfree (btinfo->call_history);
1612 
1613   btinfo->insn_history = NULL;
1614   btinfo->call_history = NULL;
1615 }
1616 
1617 /* Step a single thread.  */
1618 
1619 static struct target_waitstatus
1620 record_btrace_step_thread (struct thread_info *tp)
1621 {
1622   struct btrace_insn_iterator *replay, end;
1623   struct btrace_thread_info *btinfo;
1624   struct address_space *aspace;
1625   struct inferior *inf;
1626   enum btrace_thread_flag flags;
1627   unsigned int steps;
1628 
1629   /* We can't step without an execution history.  */
1630   if (btrace_is_empty (tp))
1631     return btrace_step_no_history ();
1632 
1633   btinfo = &tp->btrace;
1634   replay = btinfo->replay;
1635 
1636   flags = btinfo->flags & BTHR_MOVE;
1637   btinfo->flags &= ~BTHR_MOVE;
1638 
1639   DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1640 
1641   switch (flags)
1642     {
1643     default:
1644       internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1645 
1646     case BTHR_STEP:
1647       /* We're done if we're not replaying.  */
1648       if (replay == NULL)
1649 	return btrace_step_no_history ();
1650 
1651       /* We are always able to step at least once.  */
1652       steps = btrace_insn_next (replay, 1);
1653       gdb_assert (steps == 1);
1654 
1655       /* Determine the end of the instruction trace.  */
1656       btrace_insn_end (&end, btinfo);
1657 
1658       /* We stop replaying if we reached the end of the trace.  */
1659       if (btrace_insn_cmp (replay, &end) == 0)
1660 	record_btrace_stop_replaying (tp);
1661 
1662       return btrace_step_stopped ();
1663 
1664     case BTHR_RSTEP:
1665       /* Start replaying if we're not already doing so.  */
1666       if (replay == NULL)
1667 	replay = record_btrace_start_replaying (tp);
1668 
1669       /* If we can't step any further, we reached the end of the history.  */
1670       steps = btrace_insn_prev (replay, 1);
1671       if (steps == 0)
1672 	return btrace_step_no_history ();
1673 
1674       return btrace_step_stopped ();
1675 
1676     case BTHR_CONT:
1677       /* We're done if we're not replaying.  */
1678       if (replay == NULL)
1679 	return btrace_step_no_history ();
1680 
1681       inf = find_inferior_ptid (tp->ptid);
1682       aspace = inf->aspace;
1683 
1684       /* Determine the end of the instruction trace.  */
1685       btrace_insn_end (&end, btinfo);
1686 
1687       for (;;)
1688 	{
1689 	  const struct btrace_insn *insn;
1690 
1691 	  /* We are always able to step at least once.  */
1692 	  steps = btrace_insn_next (replay, 1);
1693 	  gdb_assert (steps == 1);
1694 
1695 	  /* We stop replaying if we reached the end of the trace.  */
1696 	  if (btrace_insn_cmp (replay, &end) == 0)
1697 	    {
1698 	      record_btrace_stop_replaying (tp);
1699 	      return btrace_step_no_history ();
1700 	    }
1701 
1702 	  insn = btrace_insn_get (replay);
1703 	  gdb_assert (insn);
1704 
1705 	  DEBUG ("stepping %d (%s) ... %s", tp->num,
1706 		 target_pid_to_str (tp->ptid),
1707 		 core_addr_to_string_nz (insn->pc));
1708 
1709 	  if (breakpoint_here_p (aspace, insn->pc))
1710 	    return btrace_step_stopped ();
1711 	}
1712 
1713     case BTHR_RCONT:
1714       /* Start replaying if we're not already doing so.  */
1715       if (replay == NULL)
1716 	replay = record_btrace_start_replaying (tp);
1717 
1718       inf = find_inferior_ptid (tp->ptid);
1719       aspace = inf->aspace;
1720 
1721       for (;;)
1722 	{
1723 	  const struct btrace_insn *insn;
1724 
1725 	  /* If we can't step any further, we're done.  */
1726 	  steps = btrace_insn_prev (replay, 1);
1727 	  if (steps == 0)
1728 	    return btrace_step_no_history ();
1729 
1730 	  insn = btrace_insn_get (replay);
1731 	  gdb_assert (insn);
1732 
1733 	  DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1734 		 target_pid_to_str (tp->ptid),
1735 		 core_addr_to_string_nz (insn->pc));
1736 
1737 	  if (breakpoint_here_p (aspace, insn->pc))
1738 	    return btrace_step_stopped ();
1739 	}
1740     }
1741 }
1742 
1743 /* The to_wait method of target record-btrace.  */
1744 
1745 static ptid_t
1746 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1747 		    struct target_waitstatus *status, int options)
1748 {
1749   struct thread_info *tp, *other;
1750 
1751   DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1752 
1753   /* As long as we're not replaying, just forward the request.  */
1754   if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1755     {
1756       ops = ops->beneath;
1757       return ops->to_wait (ops, ptid, status, options);
1758     }
1759 
1760   /* Let's find a thread to move.  */
1761   tp = record_btrace_find_thread_to_move (ptid);
1762   if (tp == NULL)
1763     {
1764       DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1765 
1766       status->kind = TARGET_WAITKIND_IGNORE;
1767       return minus_one_ptid;
1768     }
1769 
1770   /* We only move a single thread.  We're not able to correlate threads.  */
1771   *status = record_btrace_step_thread (tp);
1772 
1773   /* Stop all other threads. */
1774   if (!non_stop)
1775     ALL_NON_EXITED_THREADS (other)
1776       other->btrace.flags &= ~BTHR_MOVE;
1777 
1778   /* Start record histories anew from the current position.  */
1779   record_btrace_clear_histories (&tp->btrace);
1780 
1781   /* We moved the replay position but did not update registers.  */
1782   registers_changed_ptid (tp->ptid);
1783 
1784   return tp->ptid;
1785 }
1786 
1787 /* The to_can_execute_reverse method of target record-btrace.  */
1788 
1789 static int
1790 record_btrace_can_execute_reverse (struct target_ops *self)
1791 {
1792   return 1;
1793 }
1794 
1795 /* The to_decr_pc_after_break method of target record-btrace.  */
1796 
1797 static CORE_ADDR
1798 record_btrace_decr_pc_after_break (struct target_ops *ops,
1799 				   struct gdbarch *gdbarch)
1800 {
1801   /* When replaying, we do not actually execute the breakpoint instruction
1802      so there is no need to adjust the PC after hitting a breakpoint.  */
1803   if (record_btrace_is_replaying (ops))
1804     return 0;
1805 
1806   return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1807 }
1808 
1809 /* The to_update_thread_list method of target record-btrace.  */
1810 
1811 static void
1812 record_btrace_update_thread_list (struct target_ops *ops)
1813 {
1814   /* We don't add or remove threads during replay.  */
1815   if (record_btrace_is_replaying (ops))
1816     return;
1817 
1818   /* Forward the request.  */
1819   ops = ops->beneath;
1820   ops->to_update_thread_list (ops);
1821 }
1822 
1823 /* The to_thread_alive method of target record-btrace.  */
1824 
1825 static int
1826 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1827 {
1828   /* We don't add or remove threads during replay.  */
1829   if (record_btrace_is_replaying (ops))
1830     return find_thread_ptid (ptid) != NULL;
1831 
1832   /* Forward the request.  */
1833   ops = ops->beneath;
1834   return ops->to_thread_alive (ops, ptid);
1835 }
1836 
1837 /* Set the replay branch trace instruction iterator.  If IT is NULL, replay
1838    is stopped.  */
1839 
1840 static void
1841 record_btrace_set_replay (struct thread_info *tp,
1842 			  const struct btrace_insn_iterator *it)
1843 {
1844   struct btrace_thread_info *btinfo;
1845 
1846   btinfo = &tp->btrace;
1847 
1848   if (it == NULL || it->function == NULL)
1849     record_btrace_stop_replaying (tp);
1850   else
1851     {
1852       if (btinfo->replay == NULL)
1853 	record_btrace_start_replaying (tp);
1854       else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1855 	return;
1856 
1857       *btinfo->replay = *it;
1858       registers_changed_ptid (tp->ptid);
1859     }
1860 
1861   /* Start anew from the new replay position.  */
1862   record_btrace_clear_histories (btinfo);
1863 }
1864 
1865 /* The to_goto_record_begin method of target record-btrace.  */
1866 
1867 static void
1868 record_btrace_goto_begin (struct target_ops *self)
1869 {
1870   struct thread_info *tp;
1871   struct btrace_insn_iterator begin;
1872 
1873   tp = require_btrace_thread ();
1874 
1875   btrace_insn_begin (&begin, &tp->btrace);
1876   record_btrace_set_replay (tp, &begin);
1877 
1878   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1879 }
1880 
1881 /* The to_goto_record_end method of target record-btrace.  */
1882 
1883 static void
1884 record_btrace_goto_end (struct target_ops *ops)
1885 {
1886   struct thread_info *tp;
1887 
1888   tp = require_btrace_thread ();
1889 
1890   record_btrace_set_replay (tp, NULL);
1891 
1892   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1893 }
1894 
1895 /* The to_goto_record method of target record-btrace.  */
1896 
1897 static void
1898 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1899 {
1900   struct thread_info *tp;
1901   struct btrace_insn_iterator it;
1902   unsigned int number;
1903   int found;
1904 
1905   number = insn;
1906 
1907   /* Check for wrap-arounds.  */
1908   if (number != insn)
1909     error (_("Instruction number out of range."));
1910 
1911   tp = require_btrace_thread ();
1912 
1913   found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1914   if (found == 0)
1915     error (_("No such instruction."));
1916 
1917   record_btrace_set_replay (tp, &it);
1918 
1919   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1920 }
1921 
1922 /* The to_execution_direction target method.  */
1923 
1924 static enum exec_direction_kind
1925 record_btrace_execution_direction (struct target_ops *self)
1926 {
1927   return record_btrace_resume_exec_dir;
1928 }
1929 
1930 /* The to_prepare_to_generate_core target method.  */
1931 
1932 static void
1933 record_btrace_prepare_to_generate_core (struct target_ops *self)
1934 {
1935   record_btrace_generating_corefile = 1;
1936 }
1937 
1938 /* The to_done_generating_core target method.  */
1939 
1940 static void
1941 record_btrace_done_generating_core (struct target_ops *self)
1942 {
1943   record_btrace_generating_corefile = 0;
1944 }
1945 
1946 /* Initialize the record-btrace target ops.  */
1947 
1948 static void
1949 init_record_btrace_ops (void)
1950 {
1951   struct target_ops *ops;
1952 
1953   ops = &record_btrace_ops;
1954   ops->to_shortname = "record-btrace";
1955   ops->to_longname = "Branch tracing target";
1956   ops->to_doc = "Collect control-flow trace and provide the execution history.";
1957   ops->to_open = record_btrace_open;
1958   ops->to_close = record_btrace_close;
1959   ops->to_async = record_btrace_async;
1960   ops->to_detach = record_detach;
1961   ops->to_disconnect = record_disconnect;
1962   ops->to_mourn_inferior = record_mourn_inferior;
1963   ops->to_kill = record_kill;
1964   ops->to_stop_recording = record_btrace_stop_recording;
1965   ops->to_info_record = record_btrace_info;
1966   ops->to_insn_history = record_btrace_insn_history;
1967   ops->to_insn_history_from = record_btrace_insn_history_from;
1968   ops->to_insn_history_range = record_btrace_insn_history_range;
1969   ops->to_call_history = record_btrace_call_history;
1970   ops->to_call_history_from = record_btrace_call_history_from;
1971   ops->to_call_history_range = record_btrace_call_history_range;
1972   ops->to_record_is_replaying = record_btrace_is_replaying;
1973   ops->to_xfer_partial = record_btrace_xfer_partial;
1974   ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1975   ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1976   ops->to_fetch_registers = record_btrace_fetch_registers;
1977   ops->to_store_registers = record_btrace_store_registers;
1978   ops->to_prepare_to_store = record_btrace_prepare_to_store;
1979   ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1980   ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1981   ops->to_resume = record_btrace_resume;
1982   ops->to_wait = record_btrace_wait;
1983   ops->to_update_thread_list = record_btrace_update_thread_list;
1984   ops->to_thread_alive = record_btrace_thread_alive;
1985   ops->to_goto_record_begin = record_btrace_goto_begin;
1986   ops->to_goto_record_end = record_btrace_goto_end;
1987   ops->to_goto_record = record_btrace_goto;
1988   ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1989   ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1990   ops->to_execution_direction = record_btrace_execution_direction;
1991   ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1992   ops->to_done_generating_core = record_btrace_done_generating_core;
1993   ops->to_stratum = record_stratum;
1994   ops->to_magic = OPS_MAGIC;
1995 }
1996 
1997 /* Alias for "target record".  */
1998 
1999 static void
2000 cmd_record_btrace_start (char *args, int from_tty)
2001 {
2002   if (args != NULL && *args != 0)
2003     error (_("Invalid argument."));
2004 
2005   execute_command ("target record-btrace", from_tty);
2006 }
2007 
2008 /* The "set record btrace" command.  */
2009 
2010 static void
2011 cmd_set_record_btrace (char *args, int from_tty)
2012 {
2013   cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2014 }
2015 
2016 /* The "show record btrace" command.  */
2017 
2018 static void
2019 cmd_show_record_btrace (char *args, int from_tty)
2020 {
2021   cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2022 }
2023 
2024 /* The "show record btrace replay-memory-access" command.  */
2025 
2026 static void
2027 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2028 			       struct cmd_list_element *c, const char *value)
2029 {
2030   fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2031 		    replay_memory_access);
2032 }
2033 
2034 void _initialize_record_btrace (void);
2035 
2036 /* Initialize btrace commands.  */
2037 
2038 void
2039 _initialize_record_btrace (void)
2040 {
2041   add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2042 	   _("Start branch trace recording."),
2043 	   &record_cmdlist);
2044   add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2045 
2046   add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2047 		  _("Set record options"), &set_record_btrace_cmdlist,
2048 		  "set record btrace ", 0, &set_record_cmdlist);
2049 
2050   add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2051 		  _("Show record options"), &show_record_btrace_cmdlist,
2052 		  "show record btrace ", 0, &show_record_cmdlist);
2053 
2054   add_setshow_enum_cmd ("replay-memory-access", no_class,
2055 			replay_memory_access_types, &replay_memory_access, _("\
2056 Set what memory accesses are allowed during replay."), _("\
2057 Show what memory accesses are allowed during replay."),
2058 			   _("Default is READ-ONLY.\n\n\
2059 The btrace record target does not trace data.\n\
2060 The memory therefore corresponds to the live target and not \
2061 to the current replay position.\n\n\
2062 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2063 When READ-WRITE, allow accesses to read-only and read-write memory during \
2064 replay."),
2065 			   NULL, cmd_show_replay_memory_access,
2066 			   &set_record_btrace_cmdlist,
2067 			   &show_record_btrace_cmdlist);
2068 
2069   init_record_btrace_ops ();
2070   add_target (&record_btrace_ops);
2071 
2072   bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2073 			       xcalloc, xfree);
2074 }
2075