xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/record-btrace.c (revision eceb233b9bd0dfebb902ed73b531ae6964fa3f9b)
1 /* Branch trace support for GDB, the GNU debugger.
2 
3    Copyright (C) 2013-2019 Free Software Foundation, Inc.
4 
5    Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "common/vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44 
45 static const target_info record_btrace_target_info = {
46   "record-btrace",
47   N_("Branch tracing target"),
48   N_("Collect control-flow trace and provide the execution history.")
49 };
50 
51 /* The target_ops of record-btrace.  */
52 
53 class record_btrace_target final : public target_ops
54 {
55 public:
56   const target_info &info () const override
57   { return record_btrace_target_info; }
58 
59   strata stratum () const override { return record_stratum; }
60 
61   void close () override;
62   void async (int) override;
63 
64   void detach (inferior *inf, int from_tty) override
65   { record_detach (this, inf, from_tty); }
66 
67   void disconnect (const char *, int) override;
68 
69   void mourn_inferior () override
70   { record_mourn_inferior (this); }
71 
72   void kill () override
73   { record_kill (this); }
74 
75   enum record_method record_method (ptid_t ptid) override;
76 
77   void stop_recording () override;
78   void info_record () override;
79 
80   void insn_history (int size, gdb_disassembly_flags flags) override;
81   void insn_history_from (ULONGEST from, int size,
82 			  gdb_disassembly_flags flags) override;
83   void insn_history_range (ULONGEST begin, ULONGEST end,
84 			   gdb_disassembly_flags flags) override;
85   void call_history (int size, record_print_flags flags) override;
86   void call_history_from (ULONGEST begin, int size, record_print_flags flags)
87     override;
88   void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
89     override;
90 
91   bool record_is_replaying (ptid_t ptid) override;
92   bool record_will_replay (ptid_t ptid, int dir) override;
93   void record_stop_replaying () override;
94 
95   enum target_xfer_status xfer_partial (enum target_object object,
96 					const char *annex,
97 					gdb_byte *readbuf,
98 					const gdb_byte *writebuf,
99 					ULONGEST offset, ULONGEST len,
100 					ULONGEST *xfered_len) override;
101 
102   int insert_breakpoint (struct gdbarch *,
103 			 struct bp_target_info *) override;
104   int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 			 enum remove_bp_reason) override;
106 
107   void fetch_registers (struct regcache *, int) override;
108 
109   void store_registers (struct regcache *, int) override;
110   void prepare_to_store (struct regcache *) override;
111 
112   const struct frame_unwind *get_unwinder () override;
113 
114   const struct frame_unwind *get_tailcall_unwinder () override;
115 
116   void commit_resume () override;
117   void resume (ptid_t, int, enum gdb_signal) override;
118   ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
119 
120   void stop (ptid_t) override;
121   void update_thread_list () override;
122   bool thread_alive (ptid_t ptid) override;
123   void goto_record_begin () override;
124   void goto_record_end () override;
125   void goto_record (ULONGEST insn) override;
126 
127   bool can_execute_reverse () override;
128 
129   bool stopped_by_sw_breakpoint () override;
130   bool supports_stopped_by_sw_breakpoint () override;
131 
132   bool stopped_by_hw_breakpoint () override;
133   bool supports_stopped_by_hw_breakpoint () override;
134 
135   enum exec_direction_kind execution_direction () override;
136   void prepare_to_generate_core () override;
137   void done_generating_core () override;
138 };
139 
140 static record_btrace_target record_btrace_ops;
141 
142 /* Initialize the record-btrace target ops.  */
143 
144 /* Token associated with a new-thread observer enabling branch tracing
145    for the new thread.  */
146 static const gdb::observers::token record_btrace_thread_observer_token {};
147 
148 /* Memory access types used in set/show record btrace replay-memory-access.  */
149 static const char replay_memory_access_read_only[] = "read-only";
150 static const char replay_memory_access_read_write[] = "read-write";
151 static const char *const replay_memory_access_types[] =
152 {
153   replay_memory_access_read_only,
154   replay_memory_access_read_write,
155   NULL
156 };
157 
158 /* The currently allowed replay memory access type.  */
159 static const char *replay_memory_access = replay_memory_access_read_only;
160 
161 /* The cpu state kinds.  */
162 enum record_btrace_cpu_state_kind
163 {
164   CS_AUTO,
165   CS_NONE,
166   CS_CPU
167 };
168 
169 /* The current cpu state.  */
170 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
171 
172 /* The current cpu for trace decode.  */
173 static struct btrace_cpu record_btrace_cpu;
174 
175 /* Command lists for "set/show record btrace".  */
176 static struct cmd_list_element *set_record_btrace_cmdlist;
177 static struct cmd_list_element *show_record_btrace_cmdlist;
178 
179 /* The execution direction of the last resume we got.  See record-full.c.  */
180 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
181 
182 /* The async event handler for reverse/replay execution.  */
183 static struct async_event_handler *record_btrace_async_inferior_event_handler;
184 
185 /* A flag indicating that we are currently generating a core file.  */
186 static int record_btrace_generating_corefile;
187 
188 /* The current branch trace configuration.  */
189 static struct btrace_config record_btrace_conf;
190 
191 /* Command list for "record btrace".  */
192 static struct cmd_list_element *record_btrace_cmdlist;
193 
194 /* Command lists for "set/show record btrace bts".  */
195 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
197 
198 /* Command lists for "set/show record btrace pt".  */
199 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
201 
202 /* Command list for "set record btrace cpu".  */
203 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
204 
205 /* Print a record-btrace debug message.  Use do ... while (0) to avoid
206    ambiguities when used in if statements.  */
207 
208 #define DEBUG(msg, args...)						\
209   do									\
210     {									\
211       if (record_debug != 0)						\
212         fprintf_unfiltered (gdb_stdlog,					\
213 			    "[record-btrace] " msg "\n", ##args);	\
214     }									\
215   while (0)
216 
217 
218 /* Return the cpu configured by the user.  Returns NULL if the cpu was
219    configured as auto.  */
220 const struct btrace_cpu *
221 record_btrace_get_cpu (void)
222 {
223   switch (record_btrace_cpu_state)
224     {
225     case CS_AUTO:
226       return nullptr;
227 
228     case CS_NONE:
229       record_btrace_cpu.vendor = CV_UNKNOWN;
230       /* Fall through.  */
231     case CS_CPU:
232       return &record_btrace_cpu;
233     }
234 
235   error (_("Internal error: bad record btrace cpu state."));
236 }
237 
238 /* Update the branch trace for the current thread and return a pointer to its
239    thread_info.
240 
241    Throws an error if there is no thread or no trace.  This function never
242    returns NULL.  */
243 
244 static struct thread_info *
245 require_btrace_thread (void)
246 {
247   DEBUG ("require");
248 
249   if (inferior_ptid == null_ptid)
250     error (_("No thread."));
251 
252   thread_info *tp = inferior_thread ();
253 
254   validate_registers_access ();
255 
256   btrace_fetch (tp, record_btrace_get_cpu ());
257 
258   if (btrace_is_empty (tp))
259     error (_("No trace."));
260 
261   return tp;
262 }
263 
264 /* Update the branch trace for the current thread and return a pointer to its
265    branch trace information struct.
266 
267    Throws an error if there is no thread or no trace.  This function never
268    returns NULL.  */
269 
270 static struct btrace_thread_info *
271 require_btrace (void)
272 {
273   struct thread_info *tp;
274 
275   tp = require_btrace_thread ();
276 
277   return &tp->btrace;
278 }
279 
280 /* Enable branch tracing for one thread.  Warn on errors.  */
281 
282 static void
283 record_btrace_enable_warn (struct thread_info *tp)
284 {
285   TRY
286     {
287       btrace_enable (tp, &record_btrace_conf);
288     }
289   CATCH (error, RETURN_MASK_ERROR)
290     {
291       warning ("%s", error.message);
292     }
293   END_CATCH
294 }
295 
296 /* Enable automatic tracing of new threads.  */
297 
298 static void
299 record_btrace_auto_enable (void)
300 {
301   DEBUG ("attach thread observer");
302 
303   gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 				     record_btrace_thread_observer_token);
305 }
306 
307 /* Disable automatic tracing of new threads.  */
308 
309 static void
310 record_btrace_auto_disable (void)
311 {
312   DEBUG ("detach thread observer");
313 
314   gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
315 }
316 
317 /* The record-btrace async event handler function.  */
318 
319 static void
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
321 {
322   inferior_event_handler (INF_REG_EVENT, NULL);
323 }
324 
325 /* See record-btrace.h.  */
326 
327 void
328 record_btrace_push_target (void)
329 {
330   const char *format;
331 
332   record_btrace_auto_enable ();
333 
334   push_target (&record_btrace_ops);
335 
336   record_btrace_async_inferior_event_handler
337     = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 				  NULL);
339   record_btrace_generating_corefile = 0;
340 
341   format = btrace_format_short_string (record_btrace_conf.format);
342   gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
343 }
344 
345 /* Disable btrace on a set of threads on scope exit.  */
346 
347 struct scoped_btrace_disable
348 {
349   scoped_btrace_disable () = default;
350 
351   DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352 
353   ~scoped_btrace_disable ()
354   {
355     for (thread_info *tp : m_threads)
356       btrace_disable (tp);
357   }
358 
359   void add_thread (thread_info *thread)
360   {
361     m_threads.push_front (thread);
362   }
363 
364   void discard ()
365   {
366     m_threads.clear ();
367   }
368 
369 private:
370   std::forward_list<thread_info *> m_threads;
371 };
372 
373 /* Open target record-btrace.  */
374 
375 static void
376 record_btrace_target_open (const char *args, int from_tty)
377 {
378   /* If we fail to enable btrace for one thread, disable it for the threads for
379      which it was successfully enabled.  */
380   scoped_btrace_disable btrace_disable;
381 
382   DEBUG ("open");
383 
384   record_preopen ();
385 
386   if (!target_has_execution)
387     error (_("The program is not being run."));
388 
389   for (thread_info *tp : all_non_exited_threads ())
390     if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
391       {
392 	btrace_enable (tp, &record_btrace_conf);
393 
394 	btrace_disable.add_thread (tp);
395       }
396 
397   record_btrace_push_target ();
398 
399   btrace_disable.discard ();
400 }
401 
402 /* The stop_recording method of target record-btrace.  */
403 
404 void
405 record_btrace_target::stop_recording ()
406 {
407   DEBUG ("stop recording");
408 
409   record_btrace_auto_disable ();
410 
411   for (thread_info *tp : all_non_exited_threads ())
412     if (tp->btrace.target != NULL)
413       btrace_disable (tp);
414 }
415 
416 /* The disconnect method of target record-btrace.  */
417 
418 void
419 record_btrace_target::disconnect (const char *args,
420 				  int from_tty)
421 {
422   struct target_ops *beneath = this->beneath ();
423 
424   /* Do not stop recording, just clean up GDB side.  */
425   unpush_target (this);
426 
427   /* Forward disconnect.  */
428   beneath->disconnect (args, from_tty);
429 }
430 
431 /* The close method of target record-btrace.  */
432 
433 void
434 record_btrace_target::close ()
435 {
436   if (record_btrace_async_inferior_event_handler != NULL)
437     delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438 
439   /* Make sure automatic recording gets disabled even if we did not stop
440      recording before closing the record-btrace target.  */
441   record_btrace_auto_disable ();
442 
443   /* We should have already stopped recording.
444      Tear down btrace in case we have not.  */
445   for (thread_info *tp : all_non_exited_threads ())
446     btrace_teardown (tp);
447 }
448 
449 /* The async method of target record-btrace.  */
450 
451 void
452 record_btrace_target::async (int enable)
453 {
454   if (enable)
455     mark_async_event_handler (record_btrace_async_inferior_event_handler);
456   else
457     clear_async_event_handler (record_btrace_async_inferior_event_handler);
458 
459   this->beneath ()->async (enable);
460 }
461 
462 /* Adjusts the size and returns a human readable size suffix.  */
463 
464 static const char *
465 record_btrace_adjust_size (unsigned int *size)
466 {
467   unsigned int sz;
468 
469   sz = *size;
470 
471   if ((sz & ((1u << 30) - 1)) == 0)
472     {
473       *size = sz >> 30;
474       return "GB";
475     }
476   else if ((sz & ((1u << 20) - 1)) == 0)
477     {
478       *size = sz >> 20;
479       return "MB";
480     }
481   else if ((sz & ((1u << 10) - 1)) == 0)
482     {
483       *size = sz >> 10;
484       return "kB";
485     }
486   else
487     return "";
488 }
489 
490 /* Print a BTS configuration.  */
491 
492 static void
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494 {
495   const char *suffix;
496   unsigned int size;
497 
498   size = conf->size;
499   if (size > 0)
500     {
501       suffix = record_btrace_adjust_size (&size);
502       printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503     }
504 }
505 
506 /* Print an Intel Processor Trace configuration.  */
507 
508 static void
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510 {
511   const char *suffix;
512   unsigned int size;
513 
514   size = conf->size;
515   if (size > 0)
516     {
517       suffix = record_btrace_adjust_size (&size);
518       printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519     }
520 }
521 
522 /* Print a branch tracing configuration.  */
523 
524 static void
525 record_btrace_print_conf (const struct btrace_config *conf)
526 {
527   printf_unfiltered (_("Recording format: %s.\n"),
528 		     btrace_format_string (conf->format));
529 
530   switch (conf->format)
531     {
532     case BTRACE_FORMAT_NONE:
533       return;
534 
535     case BTRACE_FORMAT_BTS:
536       record_btrace_print_bts_conf (&conf->bts);
537       return;
538 
539     case BTRACE_FORMAT_PT:
540       record_btrace_print_pt_conf (&conf->pt);
541       return;
542     }
543 
544   internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545 }
546 
547 /* The info_record method of target record-btrace.  */
548 
549 void
550 record_btrace_target::info_record ()
551 {
552   struct btrace_thread_info *btinfo;
553   const struct btrace_config *conf;
554   struct thread_info *tp;
555   unsigned int insns, calls, gaps;
556 
557   DEBUG ("info");
558 
559   tp = find_thread_ptid (inferior_ptid);
560   if (tp == NULL)
561     error (_("No thread."));
562 
563   validate_registers_access ();
564 
565   btinfo = &tp->btrace;
566 
567   conf = ::btrace_conf (btinfo);
568   if (conf != NULL)
569     record_btrace_print_conf (conf);
570 
571   btrace_fetch (tp, record_btrace_get_cpu ());
572 
573   insns = 0;
574   calls = 0;
575   gaps = 0;
576 
577   if (!btrace_is_empty (tp))
578     {
579       struct btrace_call_iterator call;
580       struct btrace_insn_iterator insn;
581 
582       btrace_call_end (&call, btinfo);
583       btrace_call_prev (&call, 1);
584       calls = btrace_call_number (&call);
585 
586       btrace_insn_end (&insn, btinfo);
587       insns = btrace_insn_number (&insn);
588 
589       /* If the last instruction is not a gap, it is the current instruction
590 	 that is not actually part of the record.  */
591       if (btrace_insn_get (&insn) != NULL)
592 	insns -= 1;
593 
594       gaps = btinfo->ngaps;
595     }
596 
597   printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
598 		       "for thread %s (%s).\n"), insns, calls, gaps,
599 		     print_thread_id (tp), target_pid_to_str (tp->ptid));
600 
601   if (btrace_is_replaying (tp))
602     printf_unfiltered (_("Replay in progress.  At instruction %u.\n"),
603 		       btrace_insn_number (btinfo->replay));
604 }
605 
606 /* Print a decode error.  */
607 
608 static void
609 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
610 			    enum btrace_format format)
611 {
612   const char *errstr = btrace_decode_error (format, errcode);
613 
614   uiout->text (_("["));
615   /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT.  */
616   if (!(format == BTRACE_FORMAT_PT && errcode > 0))
617     {
618       uiout->text (_("decode error ("));
619       uiout->field_int ("errcode", errcode);
620       uiout->text (_("): "));
621     }
622   uiout->text (errstr);
623   uiout->text (_("]\n"));
624 }
625 
626 /* Print an unsigned int.  */
627 
628 static void
629 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
630 {
631   uiout->field_fmt (fld, "%u", val);
632 }
633 
634 /* A range of source lines.  */
635 
636 struct btrace_line_range
637 {
638   /* The symtab this line is from.  */
639   struct symtab *symtab;
640 
641   /* The first line (inclusive).  */
642   int begin;
643 
644   /* The last line (exclusive).  */
645   int end;
646 };
647 
648 /* Construct a line range.  */
649 
650 static struct btrace_line_range
651 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
652 {
653   struct btrace_line_range range;
654 
655   range.symtab = symtab;
656   range.begin = begin;
657   range.end = end;
658 
659   return range;
660 }
661 
662 /* Add a line to a line range.  */
663 
664 static struct btrace_line_range
665 btrace_line_range_add (struct btrace_line_range range, int line)
666 {
667   if (range.end <= range.begin)
668     {
669       /* This is the first entry.  */
670       range.begin = line;
671       range.end = line + 1;
672     }
673   else if (line < range.begin)
674     range.begin = line;
675   else if (range.end < line)
676     range.end = line;
677 
678   return range;
679 }
680 
681 /* Return non-zero if RANGE is empty, zero otherwise.  */
682 
683 static int
684 btrace_line_range_is_empty (struct btrace_line_range range)
685 {
686   return range.end <= range.begin;
687 }
688 
689 /* Return non-zero if LHS contains RHS, zero otherwise.  */
690 
691 static int
692 btrace_line_range_contains_range (struct btrace_line_range lhs,
693 				  struct btrace_line_range rhs)
694 {
695   return ((lhs.symtab == rhs.symtab)
696 	  && (lhs.begin <= rhs.begin)
697 	  && (rhs.end <= lhs.end));
698 }
699 
700 /* Find the line range associated with PC.  */
701 
702 static struct btrace_line_range
703 btrace_find_line_range (CORE_ADDR pc)
704 {
705   struct btrace_line_range range;
706   struct linetable_entry *lines;
707   struct linetable *ltable;
708   struct symtab *symtab;
709   int nlines, i;
710 
711   symtab = find_pc_line_symtab (pc);
712   if (symtab == NULL)
713     return btrace_mk_line_range (NULL, 0, 0);
714 
715   ltable = SYMTAB_LINETABLE (symtab);
716   if (ltable == NULL)
717     return btrace_mk_line_range (symtab, 0, 0);
718 
719   nlines = ltable->nitems;
720   lines = ltable->item;
721   if (nlines <= 0)
722     return btrace_mk_line_range (symtab, 0, 0);
723 
724   range = btrace_mk_line_range (symtab, 0, 0);
725   for (i = 0; i < nlines - 1; i++)
726     {
727       if ((lines[i].pc == pc) && (lines[i].line != 0))
728 	range = btrace_line_range_add (range, lines[i].line);
729     }
730 
731   return range;
732 }
733 
734 /* Print source lines in LINES to UIOUT.
735 
736    UI_ITEM_CHAIN is a cleanup chain for the last source line and the
737    instructions corresponding to that source line.  When printing a new source
738    line, we do the cleanups for the open chain and open a new cleanup chain for
739    the new source line.  If the source line range in LINES is not empty, this
740    function will leave the cleanup chain for the last printed source line open
741    so instructions can be added to it.  */
742 
743 static void
744 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
745 		    gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
746 		    gdb::optional<ui_out_emit_list> *asm_list,
747 		    gdb_disassembly_flags flags)
748 {
749   print_source_lines_flags psl_flags;
750 
751   if (flags & DISASSEMBLY_FILENAME)
752     psl_flags |= PRINT_SOURCE_LINES_FILENAME;
753 
754   for (int line = lines.begin; line < lines.end; ++line)
755     {
756       asm_list->reset ();
757 
758       src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
759 
760       print_source_lines (lines.symtab, line, line + 1, psl_flags);
761 
762       asm_list->emplace (uiout, "line_asm_insn");
763     }
764 }
765 
766 /* Disassemble a section of the recorded instruction trace.  */
767 
768 static void
769 btrace_insn_history (struct ui_out *uiout,
770 		     const struct btrace_thread_info *btinfo,
771 		     const struct btrace_insn_iterator *begin,
772 		     const struct btrace_insn_iterator *end,
773 		     gdb_disassembly_flags flags)
774 {
775   DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
776 	 btrace_insn_number (begin), btrace_insn_number (end));
777 
778   flags |= DISASSEMBLY_SPECULATIVE;
779 
780   struct gdbarch *gdbarch = target_gdbarch ();
781   btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
782 
783   ui_out_emit_list list_emitter (uiout, "asm_insns");
784 
785   gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
786   gdb::optional<ui_out_emit_list> asm_list;
787 
788   gdb_pretty_print_disassembler disasm (gdbarch);
789 
790   for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
791          btrace_insn_next (&it, 1))
792     {
793       const struct btrace_insn *insn;
794 
795       insn = btrace_insn_get (&it);
796 
797       /* A NULL instruction indicates a gap in the trace.  */
798       if (insn == NULL)
799 	{
800 	  const struct btrace_config *conf;
801 
802 	  conf = btrace_conf (btinfo);
803 
804 	  /* We have trace so we must have a configuration.  */
805 	  gdb_assert (conf != NULL);
806 
807 	  uiout->field_fmt ("insn-number", "%u",
808 			    btrace_insn_number (&it));
809 	  uiout->text ("\t");
810 
811 	  btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
812 				      conf->format);
813 	}
814       else
815 	{
816 	  struct disasm_insn dinsn;
817 
818 	  if ((flags & DISASSEMBLY_SOURCE) != 0)
819 	    {
820 	      struct btrace_line_range lines;
821 
822 	      lines = btrace_find_line_range (insn->pc);
823 	      if (!btrace_line_range_is_empty (lines)
824 		  && !btrace_line_range_contains_range (last_lines, lines))
825 		{
826 		  btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
827 				      flags);
828 		  last_lines = lines;
829 		}
830 	      else if (!src_and_asm_tuple.has_value ())
831 		{
832 		  gdb_assert (!asm_list.has_value ());
833 
834 		  src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
835 
836 		  /* No source information.  */
837 		  asm_list.emplace (uiout, "line_asm_insn");
838 		}
839 
840 	      gdb_assert (src_and_asm_tuple.has_value ());
841 	      gdb_assert (asm_list.has_value ());
842 	    }
843 
844 	  memset (&dinsn, 0, sizeof (dinsn));
845 	  dinsn.number = btrace_insn_number (&it);
846 	  dinsn.addr = insn->pc;
847 
848 	  if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
849 	    dinsn.is_speculative = 1;
850 
851 	  disasm.pretty_print_insn (uiout, &dinsn, flags);
852 	}
853     }
854 }
855 
856 /* The insn_history method of target record-btrace.  */
857 
858 void
859 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
860 {
861   struct btrace_thread_info *btinfo;
862   struct btrace_insn_history *history;
863   struct btrace_insn_iterator begin, end;
864   struct ui_out *uiout;
865   unsigned int context, covered;
866 
867   uiout = current_uiout;
868   ui_out_emit_tuple tuple_emitter (uiout, "insn history");
869   context = abs (size);
870   if (context == 0)
871     error (_("Bad record instruction-history-size."));
872 
873   btinfo = require_btrace ();
874   history = btinfo->insn_history;
875   if (history == NULL)
876     {
877       struct btrace_insn_iterator *replay;
878 
879       DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
880 
881       /* If we're replaying, we start at the replay position.  Otherwise, we
882 	 start at the tail of the trace.  */
883       replay = btinfo->replay;
884       if (replay != NULL)
885 	begin = *replay;
886       else
887 	btrace_insn_end (&begin, btinfo);
888 
889       /* We start from here and expand in the requested direction.  Then we
890 	 expand in the other direction, as well, to fill up any remaining
891 	 context.  */
892       end = begin;
893       if (size < 0)
894 	{
895 	  /* We want the current position covered, as well.  */
896 	  covered = btrace_insn_next (&end, 1);
897 	  covered += btrace_insn_prev (&begin, context - covered);
898 	  covered += btrace_insn_next (&end, context - covered);
899 	}
900       else
901 	{
902 	  covered = btrace_insn_next (&end, context);
903 	  covered += btrace_insn_prev (&begin, context - covered);
904 	}
905     }
906   else
907     {
908       begin = history->begin;
909       end = history->end;
910 
911       DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
912 	     btrace_insn_number (&begin), btrace_insn_number (&end));
913 
914       if (size < 0)
915 	{
916 	  end = begin;
917 	  covered = btrace_insn_prev (&begin, context);
918 	}
919       else
920 	{
921 	  begin = end;
922 	  covered = btrace_insn_next (&end, context);
923 	}
924     }
925 
926   if (covered > 0)
927     btrace_insn_history (uiout, btinfo, &begin, &end, flags);
928   else
929     {
930       if (size < 0)
931 	printf_unfiltered (_("At the start of the branch trace record.\n"));
932       else
933 	printf_unfiltered (_("At the end of the branch trace record.\n"));
934     }
935 
936   btrace_set_insn_history (btinfo, &begin, &end);
937 }
938 
939 /* The insn_history_range method of target record-btrace.  */
940 
941 void
942 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
943 					  gdb_disassembly_flags flags)
944 {
945   struct btrace_thread_info *btinfo;
946   struct btrace_insn_iterator begin, end;
947   struct ui_out *uiout;
948   unsigned int low, high;
949   int found;
950 
951   uiout = current_uiout;
952   ui_out_emit_tuple tuple_emitter (uiout, "insn history");
953   low = from;
954   high = to;
955 
956   DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
957 
958   /* Check for wrap-arounds.  */
959   if (low != from || high != to)
960     error (_("Bad range."));
961 
962   if (high < low)
963     error (_("Bad range."));
964 
965   btinfo = require_btrace ();
966 
967   found = btrace_find_insn_by_number (&begin, btinfo, low);
968   if (found == 0)
969     error (_("Range out of bounds."));
970 
971   found = btrace_find_insn_by_number (&end, btinfo, high);
972   if (found == 0)
973     {
974       /* Silently truncate the range.  */
975       btrace_insn_end (&end, btinfo);
976     }
977   else
978     {
979       /* We want both begin and end to be inclusive.  */
980       btrace_insn_next (&end, 1);
981     }
982 
983   btrace_insn_history (uiout, btinfo, &begin, &end, flags);
984   btrace_set_insn_history (btinfo, &begin, &end);
985 }
986 
987 /* The insn_history_from method of target record-btrace.  */
988 
989 void
990 record_btrace_target::insn_history_from (ULONGEST from, int size,
991 					 gdb_disassembly_flags flags)
992 {
993   ULONGEST begin, end, context;
994 
995   context = abs (size);
996   if (context == 0)
997     error (_("Bad record instruction-history-size."));
998 
999   if (size < 0)
1000     {
1001       end = from;
1002 
1003       if (from < context)
1004 	begin = 0;
1005       else
1006 	begin = from - context + 1;
1007     }
1008   else
1009     {
1010       begin = from;
1011       end = from + context - 1;
1012 
1013       /* Check for wrap-around.  */
1014       if (end < begin)
1015 	end = ULONGEST_MAX;
1016     }
1017 
1018   insn_history_range (begin, end, flags);
1019 }
1020 
1021 /* Print the instruction number range for a function call history line.  */
1022 
1023 static void
1024 btrace_call_history_insn_range (struct ui_out *uiout,
1025 				const struct btrace_function *bfun)
1026 {
1027   unsigned int begin, end, size;
1028 
1029   size = bfun->insn.size ();
1030   gdb_assert (size > 0);
1031 
1032   begin = bfun->insn_offset;
1033   end = begin + size - 1;
1034 
1035   ui_out_field_uint (uiout, "insn begin", begin);
1036   uiout->text (",");
1037   ui_out_field_uint (uiout, "insn end", end);
1038 }
1039 
1040 /* Compute the lowest and highest source line for the instructions in BFUN
1041    and return them in PBEGIN and PEND.
1042    Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1043    result from inlining or macro expansion.  */
1044 
1045 static void
1046 btrace_compute_src_line_range (const struct btrace_function *bfun,
1047 			       int *pbegin, int *pend)
1048 {
1049   struct symtab *symtab;
1050   struct symbol *sym;
1051   int begin, end;
1052 
1053   begin = INT_MAX;
1054   end = INT_MIN;
1055 
1056   sym = bfun->sym;
1057   if (sym == NULL)
1058     goto out;
1059 
1060   symtab = symbol_symtab (sym);
1061 
1062   for (const btrace_insn &insn : bfun->insn)
1063     {
1064       struct symtab_and_line sal;
1065 
1066       sal = find_pc_line (insn.pc, 0);
1067       if (sal.symtab != symtab || sal.line == 0)
1068 	continue;
1069 
1070       begin = std::min (begin, sal.line);
1071       end = std::max (end, sal.line);
1072     }
1073 
1074  out:
1075   *pbegin = begin;
1076   *pend = end;
1077 }
1078 
1079 /* Print the source line information for a function call history line.  */
1080 
1081 static void
1082 btrace_call_history_src_line (struct ui_out *uiout,
1083 			      const struct btrace_function *bfun)
1084 {
1085   struct symbol *sym;
1086   int begin, end;
1087 
1088   sym = bfun->sym;
1089   if (sym == NULL)
1090     return;
1091 
1092   uiout->field_string ("file",
1093 		       symtab_to_filename_for_display (symbol_symtab (sym)),
1094 		       ui_out_style_kind::FILE);
1095 
1096   btrace_compute_src_line_range (bfun, &begin, &end);
1097   if (end < begin)
1098     return;
1099 
1100   uiout->text (":");
1101   uiout->field_int ("min line", begin);
1102 
1103   if (end == begin)
1104     return;
1105 
1106   uiout->text (",");
1107   uiout->field_int ("max line", end);
1108 }
1109 
1110 /* Get the name of a branch trace function.  */
1111 
1112 static const char *
1113 btrace_get_bfun_name (const struct btrace_function *bfun)
1114 {
1115   struct minimal_symbol *msym;
1116   struct symbol *sym;
1117 
1118   if (bfun == NULL)
1119     return "??";
1120 
1121   msym = bfun->msym;
1122   sym = bfun->sym;
1123 
1124   if (sym != NULL)
1125     return SYMBOL_PRINT_NAME (sym);
1126   else if (msym != NULL)
1127     return MSYMBOL_PRINT_NAME (msym);
1128   else
1129     return "??";
1130 }
1131 
1132 /* Disassemble a section of the recorded function trace.  */
1133 
1134 static void
1135 btrace_call_history (struct ui_out *uiout,
1136 		     const struct btrace_thread_info *btinfo,
1137 		     const struct btrace_call_iterator *begin,
1138 		     const struct btrace_call_iterator *end,
1139 		     int int_flags)
1140 {
1141   struct btrace_call_iterator it;
1142   record_print_flags flags = (enum record_print_flag) int_flags;
1143 
1144   DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1145 	 btrace_call_number (end));
1146 
1147   for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1148     {
1149       const struct btrace_function *bfun;
1150       struct minimal_symbol *msym;
1151       struct symbol *sym;
1152 
1153       bfun = btrace_call_get (&it);
1154       sym = bfun->sym;
1155       msym = bfun->msym;
1156 
1157       /* Print the function index.  */
1158       ui_out_field_uint (uiout, "index", bfun->number);
1159       uiout->text ("\t");
1160 
1161       /* Indicate gaps in the trace.  */
1162       if (bfun->errcode != 0)
1163 	{
1164 	  const struct btrace_config *conf;
1165 
1166 	  conf = btrace_conf (btinfo);
1167 
1168 	  /* We have trace so we must have a configuration.  */
1169 	  gdb_assert (conf != NULL);
1170 
1171 	  btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1172 
1173 	  continue;
1174 	}
1175 
1176       if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1177 	{
1178 	  int level = bfun->level + btinfo->level, i;
1179 
1180 	  for (i = 0; i < level; ++i)
1181 	    uiout->text ("  ");
1182 	}
1183 
1184       if (sym != NULL)
1185 	uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1186 			     ui_out_style_kind::FUNCTION);
1187       else if (msym != NULL)
1188 	uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1189 			     ui_out_style_kind::FUNCTION);
1190       else if (!uiout->is_mi_like_p ())
1191 	uiout->field_string ("function", "??",
1192 			     ui_out_style_kind::FUNCTION);
1193 
1194       if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1195 	{
1196 	  uiout->text (_("\tinst "));
1197 	  btrace_call_history_insn_range (uiout, bfun);
1198 	}
1199 
1200       if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1201 	{
1202 	  uiout->text (_("\tat "));
1203 	  btrace_call_history_src_line (uiout, bfun);
1204 	}
1205 
1206       uiout->text ("\n");
1207     }
1208 }
1209 
1210 /* The call_history method of target record-btrace.  */
1211 
1212 void
1213 record_btrace_target::call_history (int size, record_print_flags flags)
1214 {
1215   struct btrace_thread_info *btinfo;
1216   struct btrace_call_history *history;
1217   struct btrace_call_iterator begin, end;
1218   struct ui_out *uiout;
1219   unsigned int context, covered;
1220 
1221   uiout = current_uiout;
1222   ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1223   context = abs (size);
1224   if (context == 0)
1225     error (_("Bad record function-call-history-size."));
1226 
1227   btinfo = require_btrace ();
1228   history = btinfo->call_history;
1229   if (history == NULL)
1230     {
1231       struct btrace_insn_iterator *replay;
1232 
1233       DEBUG ("call-history (0x%x): %d", (int) flags, size);
1234 
1235       /* If we're replaying, we start at the replay position.  Otherwise, we
1236 	 start at the tail of the trace.  */
1237       replay = btinfo->replay;
1238       if (replay != NULL)
1239 	{
1240 	  begin.btinfo = btinfo;
1241 	  begin.index = replay->call_index;
1242 	}
1243       else
1244 	btrace_call_end (&begin, btinfo);
1245 
1246       /* We start from here and expand in the requested direction.  Then we
1247 	 expand in the other direction, as well, to fill up any remaining
1248 	 context.  */
1249       end = begin;
1250       if (size < 0)
1251 	{
1252 	  /* We want the current position covered, as well.  */
1253 	  covered = btrace_call_next (&end, 1);
1254 	  covered += btrace_call_prev (&begin, context - covered);
1255 	  covered += btrace_call_next (&end, context - covered);
1256 	}
1257       else
1258 	{
1259 	  covered = btrace_call_next (&end, context);
1260 	  covered += btrace_call_prev (&begin, context- covered);
1261 	}
1262     }
1263   else
1264     {
1265       begin = history->begin;
1266       end = history->end;
1267 
1268       DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1269 	     btrace_call_number (&begin), btrace_call_number (&end));
1270 
1271       if (size < 0)
1272 	{
1273 	  end = begin;
1274 	  covered = btrace_call_prev (&begin, context);
1275 	}
1276       else
1277 	{
1278 	  begin = end;
1279 	  covered = btrace_call_next (&end, context);
1280 	}
1281     }
1282 
1283   if (covered > 0)
1284     btrace_call_history (uiout, btinfo, &begin, &end, flags);
1285   else
1286     {
1287       if (size < 0)
1288 	printf_unfiltered (_("At the start of the branch trace record.\n"));
1289       else
1290 	printf_unfiltered (_("At the end of the branch trace record.\n"));
1291     }
1292 
1293   btrace_set_call_history (btinfo, &begin, &end);
1294 }
1295 
1296 /* The call_history_range method of target record-btrace.  */
1297 
1298 void
1299 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1300 					  record_print_flags flags)
1301 {
1302   struct btrace_thread_info *btinfo;
1303   struct btrace_call_iterator begin, end;
1304   struct ui_out *uiout;
1305   unsigned int low, high;
1306   int found;
1307 
1308   uiout = current_uiout;
1309   ui_out_emit_tuple tuple_emitter (uiout, "func history");
1310   low = from;
1311   high = to;
1312 
1313   DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1314 
1315   /* Check for wrap-arounds.  */
1316   if (low != from || high != to)
1317     error (_("Bad range."));
1318 
1319   if (high < low)
1320     error (_("Bad range."));
1321 
1322   btinfo = require_btrace ();
1323 
1324   found = btrace_find_call_by_number (&begin, btinfo, low);
1325   if (found == 0)
1326     error (_("Range out of bounds."));
1327 
1328   found = btrace_find_call_by_number (&end, btinfo, high);
1329   if (found == 0)
1330     {
1331       /* Silently truncate the range.  */
1332       btrace_call_end (&end, btinfo);
1333     }
1334   else
1335     {
1336       /* We want both begin and end to be inclusive.  */
1337       btrace_call_next (&end, 1);
1338     }
1339 
1340   btrace_call_history (uiout, btinfo, &begin, &end, flags);
1341   btrace_set_call_history (btinfo, &begin, &end);
1342 }
1343 
1344 /* The call_history_from method of target record-btrace.  */
1345 
1346 void
1347 record_btrace_target::call_history_from (ULONGEST from, int size,
1348 					 record_print_flags flags)
1349 {
1350   ULONGEST begin, end, context;
1351 
1352   context = abs (size);
1353   if (context == 0)
1354     error (_("Bad record function-call-history-size."));
1355 
1356   if (size < 0)
1357     {
1358       end = from;
1359 
1360       if (from < context)
1361 	begin = 0;
1362       else
1363 	begin = from - context + 1;
1364     }
1365   else
1366     {
1367       begin = from;
1368       end = from + context - 1;
1369 
1370       /* Check for wrap-around.  */
1371       if (end < begin)
1372 	end = ULONGEST_MAX;
1373     }
1374 
1375   call_history_range ( begin, end, flags);
1376 }
1377 
1378 /* The record_method method of target record-btrace.  */
1379 
1380 enum record_method
1381 record_btrace_target::record_method (ptid_t ptid)
1382 {
1383   struct thread_info * const tp = find_thread_ptid (ptid);
1384 
1385   if (tp == NULL)
1386     error (_("No thread."));
1387 
1388   if (tp->btrace.target == NULL)
1389     return RECORD_METHOD_NONE;
1390 
1391   return RECORD_METHOD_BTRACE;
1392 }
1393 
1394 /* The record_is_replaying method of target record-btrace.  */
1395 
1396 bool
1397 record_btrace_target::record_is_replaying (ptid_t ptid)
1398 {
1399   for (thread_info *tp : all_non_exited_threads (ptid))
1400     if (btrace_is_replaying (tp))
1401       return true;
1402 
1403   return false;
1404 }
1405 
1406 /* The record_will_replay method of target record-btrace.  */
1407 
1408 bool
1409 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1410 {
1411   return dir == EXEC_REVERSE || record_is_replaying (ptid);
1412 }
1413 
1414 /* The xfer_partial method of target record-btrace.  */
1415 
1416 enum target_xfer_status
1417 record_btrace_target::xfer_partial (enum target_object object,
1418 				    const char *annex, gdb_byte *readbuf,
1419 				    const gdb_byte *writebuf, ULONGEST offset,
1420 				    ULONGEST len, ULONGEST *xfered_len)
1421 {
1422   /* Filter out requests that don't make sense during replay.  */
1423   if (replay_memory_access == replay_memory_access_read_only
1424       && !record_btrace_generating_corefile
1425       && record_is_replaying (inferior_ptid))
1426     {
1427       switch (object)
1428 	{
1429 	case TARGET_OBJECT_MEMORY:
1430 	  {
1431 	    struct target_section *section;
1432 
1433 	    /* We do not allow writing memory in general.  */
1434 	    if (writebuf != NULL)
1435 	      {
1436 		*xfered_len = len;
1437 		return TARGET_XFER_UNAVAILABLE;
1438 	      }
1439 
1440 	    /* We allow reading readonly memory.  */
1441 	    section = target_section_by_addr (this, offset);
1442 	    if (section != NULL)
1443 	      {
1444 		/* Check if the section we found is readonly.  */
1445 		if ((bfd_get_section_flags (section->the_bfd_section->owner,
1446 					    section->the_bfd_section)
1447 		     & SEC_READONLY) != 0)
1448 		  {
1449 		    /* Truncate the request to fit into this section.  */
1450 		    len = std::min (len, section->endaddr - offset);
1451 		    break;
1452 		  }
1453 	      }
1454 
1455 	    *xfered_len = len;
1456 	    return TARGET_XFER_UNAVAILABLE;
1457 	  }
1458 	}
1459     }
1460 
1461   /* Forward the request.  */
1462   return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1463 					 offset, len, xfered_len);
1464 }
1465 
1466 /* The insert_breakpoint method of target record-btrace.  */
1467 
1468 int
1469 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1470 					 struct bp_target_info *bp_tgt)
1471 {
1472   const char *old;
1473   int ret;
1474 
1475   /* Inserting breakpoints requires accessing memory.  Allow it for the
1476      duration of this function.  */
1477   old = replay_memory_access;
1478   replay_memory_access = replay_memory_access_read_write;
1479 
1480   ret = 0;
1481   TRY
1482     {
1483       ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1484     }
1485   CATCH (except, RETURN_MASK_ALL)
1486     {
1487       replay_memory_access = old;
1488       throw_exception (except);
1489     }
1490   END_CATCH
1491   replay_memory_access = old;
1492 
1493   return ret;
1494 }
1495 
1496 /* The remove_breakpoint method of target record-btrace.  */
1497 
1498 int
1499 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1500 					 struct bp_target_info *bp_tgt,
1501 					 enum remove_bp_reason reason)
1502 {
1503   const char *old;
1504   int ret;
1505 
1506   /* Removing breakpoints requires accessing memory.  Allow it for the
1507      duration of this function.  */
1508   old = replay_memory_access;
1509   replay_memory_access = replay_memory_access_read_write;
1510 
1511   ret = 0;
1512   TRY
1513     {
1514       ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1515     }
1516   CATCH (except, RETURN_MASK_ALL)
1517     {
1518       replay_memory_access = old;
1519       throw_exception (except);
1520     }
1521   END_CATCH
1522   replay_memory_access = old;
1523 
1524   return ret;
1525 }
1526 
1527 /* The fetch_registers method of target record-btrace.  */
1528 
1529 void
1530 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1531 {
1532   struct btrace_insn_iterator *replay;
1533   struct thread_info *tp;
1534 
1535   tp = find_thread_ptid (regcache->ptid ());
1536   gdb_assert (tp != NULL);
1537 
1538   replay = tp->btrace.replay;
1539   if (replay != NULL && !record_btrace_generating_corefile)
1540     {
1541       const struct btrace_insn *insn;
1542       struct gdbarch *gdbarch;
1543       int pcreg;
1544 
1545       gdbarch = regcache->arch ();
1546       pcreg = gdbarch_pc_regnum (gdbarch);
1547       if (pcreg < 0)
1548 	return;
1549 
1550       /* We can only provide the PC register.  */
1551       if (regno >= 0 && regno != pcreg)
1552 	return;
1553 
1554       insn = btrace_insn_get (replay);
1555       gdb_assert (insn != NULL);
1556 
1557       regcache->raw_supply (regno, &insn->pc);
1558     }
1559   else
1560     this->beneath ()->fetch_registers (regcache, regno);
1561 }
1562 
1563 /* The store_registers method of target record-btrace.  */
1564 
1565 void
1566 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1567 {
1568   if (!record_btrace_generating_corefile
1569       && record_is_replaying (regcache->ptid ()))
1570     error (_("Cannot write registers while replaying."));
1571 
1572   gdb_assert (may_write_registers != 0);
1573 
1574   this->beneath ()->store_registers (regcache, regno);
1575 }
1576 
1577 /* The prepare_to_store method of target record-btrace.  */
1578 
1579 void
1580 record_btrace_target::prepare_to_store (struct regcache *regcache)
1581 {
1582   if (!record_btrace_generating_corefile
1583       && record_is_replaying (regcache->ptid ()))
1584     return;
1585 
1586   this->beneath ()->prepare_to_store (regcache);
1587 }
1588 
1589 /* The branch trace frame cache.  */
1590 
1591 struct btrace_frame_cache
1592 {
1593   /* The thread.  */
1594   struct thread_info *tp;
1595 
1596   /* The frame info.  */
1597   struct frame_info *frame;
1598 
1599   /* The branch trace function segment.  */
1600   const struct btrace_function *bfun;
1601 };
1602 
1603 /* A struct btrace_frame_cache hash table indexed by NEXT.  */
1604 
1605 static htab_t bfcache;
1606 
1607 /* hash_f for htab_create_alloc of bfcache.  */
1608 
1609 static hashval_t
1610 bfcache_hash (const void *arg)
1611 {
1612   const struct btrace_frame_cache *cache
1613     = (const struct btrace_frame_cache *) arg;
1614 
1615   return htab_hash_pointer (cache->frame);
1616 }
1617 
1618 /* eq_f for htab_create_alloc of bfcache.  */
1619 
1620 static int
1621 bfcache_eq (const void *arg1, const void *arg2)
1622 {
1623   const struct btrace_frame_cache *cache1
1624     = (const struct btrace_frame_cache *) arg1;
1625   const struct btrace_frame_cache *cache2
1626     = (const struct btrace_frame_cache *) arg2;
1627 
1628   return cache1->frame == cache2->frame;
1629 }
1630 
1631 /* Create a new btrace frame cache.  */
1632 
1633 static struct btrace_frame_cache *
1634 bfcache_new (struct frame_info *frame)
1635 {
1636   struct btrace_frame_cache *cache;
1637   void **slot;
1638 
1639   cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1640   cache->frame = frame;
1641 
1642   slot = htab_find_slot (bfcache, cache, INSERT);
1643   gdb_assert (*slot == NULL);
1644   *slot = cache;
1645 
1646   return cache;
1647 }
1648 
1649 /* Extract the branch trace function from a branch trace frame.  */
1650 
1651 static const struct btrace_function *
1652 btrace_get_frame_function (struct frame_info *frame)
1653 {
1654   const struct btrace_frame_cache *cache;
1655   struct btrace_frame_cache pattern;
1656   void **slot;
1657 
1658   pattern.frame = frame;
1659 
1660   slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1661   if (slot == NULL)
1662     return NULL;
1663 
1664   cache = (const struct btrace_frame_cache *) *slot;
1665   return cache->bfun;
1666 }
1667 
1668 /* Implement stop_reason method for record_btrace_frame_unwind.  */
1669 
1670 static enum unwind_stop_reason
1671 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1672 					void **this_cache)
1673 {
1674   const struct btrace_frame_cache *cache;
1675   const struct btrace_function *bfun;
1676 
1677   cache = (const struct btrace_frame_cache *) *this_cache;
1678   bfun = cache->bfun;
1679   gdb_assert (bfun != NULL);
1680 
1681   if (bfun->up == 0)
1682     return UNWIND_UNAVAILABLE;
1683 
1684   return UNWIND_NO_REASON;
1685 }
1686 
1687 /* Implement this_id method for record_btrace_frame_unwind.  */
1688 
1689 static void
1690 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1691 			     struct frame_id *this_id)
1692 {
1693   const struct btrace_frame_cache *cache;
1694   const struct btrace_function *bfun;
1695   struct btrace_call_iterator it;
1696   CORE_ADDR code, special;
1697 
1698   cache = (const struct btrace_frame_cache *) *this_cache;
1699 
1700   bfun = cache->bfun;
1701   gdb_assert (bfun != NULL);
1702 
1703   while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1704     bfun = btrace_call_get (&it);
1705 
1706   code = get_frame_func (this_frame);
1707   special = bfun->number;
1708 
1709   *this_id = frame_id_build_unavailable_stack_special (code, special);
1710 
1711   DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1712 	 btrace_get_bfun_name (cache->bfun),
1713 	 core_addr_to_string_nz (this_id->code_addr),
1714 	 core_addr_to_string_nz (this_id->special_addr));
1715 }
1716 
1717 /* Implement prev_register method for record_btrace_frame_unwind.  */
1718 
1719 static struct value *
1720 record_btrace_frame_prev_register (struct frame_info *this_frame,
1721 				   void **this_cache,
1722 				   int regnum)
1723 {
1724   const struct btrace_frame_cache *cache;
1725   const struct btrace_function *bfun, *caller;
1726   struct btrace_call_iterator it;
1727   struct gdbarch *gdbarch;
1728   CORE_ADDR pc;
1729   int pcreg;
1730 
1731   gdbarch = get_frame_arch (this_frame);
1732   pcreg = gdbarch_pc_regnum (gdbarch);
1733   if (pcreg < 0 || regnum != pcreg)
1734     throw_error (NOT_AVAILABLE_ERROR,
1735 		 _("Registers are not available in btrace record history"));
1736 
1737   cache = (const struct btrace_frame_cache *) *this_cache;
1738   bfun = cache->bfun;
1739   gdb_assert (bfun != NULL);
1740 
1741   if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1742     throw_error (NOT_AVAILABLE_ERROR,
1743 		 _("No caller in btrace record history"));
1744 
1745   caller = btrace_call_get (&it);
1746 
1747   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1748     pc = caller->insn.front ().pc;
1749   else
1750     {
1751       pc = caller->insn.back ().pc;
1752       pc += gdb_insn_length (gdbarch, pc);
1753     }
1754 
1755   DEBUG ("[frame] unwound PC in %s on level %d: %s",
1756 	 btrace_get_bfun_name (bfun), bfun->level,
1757 	 core_addr_to_string_nz (pc));
1758 
1759   return frame_unwind_got_address (this_frame, regnum, pc);
1760 }
1761 
1762 /* Implement sniffer method for record_btrace_frame_unwind.  */
1763 
1764 static int
1765 record_btrace_frame_sniffer (const struct frame_unwind *self,
1766 			     struct frame_info *this_frame,
1767 			     void **this_cache)
1768 {
1769   const struct btrace_function *bfun;
1770   struct btrace_frame_cache *cache;
1771   struct thread_info *tp;
1772   struct frame_info *next;
1773 
1774   /* THIS_FRAME does not contain a reference to its thread.  */
1775   tp = inferior_thread ();
1776 
1777   bfun = NULL;
1778   next = get_next_frame (this_frame);
1779   if (next == NULL)
1780     {
1781       const struct btrace_insn_iterator *replay;
1782 
1783       replay = tp->btrace.replay;
1784       if (replay != NULL)
1785 	bfun = &replay->btinfo->functions[replay->call_index];
1786     }
1787   else
1788     {
1789       const struct btrace_function *callee;
1790       struct btrace_call_iterator it;
1791 
1792       callee = btrace_get_frame_function (next);
1793       if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1794 	return 0;
1795 
1796       if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1797 	return 0;
1798 
1799       bfun = btrace_call_get (&it);
1800     }
1801 
1802   if (bfun == NULL)
1803     return 0;
1804 
1805   DEBUG ("[frame] sniffed frame for %s on level %d",
1806 	 btrace_get_bfun_name (bfun), bfun->level);
1807 
1808   /* This is our frame.  Initialize the frame cache.  */
1809   cache = bfcache_new (this_frame);
1810   cache->tp = tp;
1811   cache->bfun = bfun;
1812 
1813   *this_cache = cache;
1814   return 1;
1815 }
1816 
1817 /* Implement sniffer method for record_btrace_tailcall_frame_unwind.  */
1818 
1819 static int
1820 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1821 				      struct frame_info *this_frame,
1822 				      void **this_cache)
1823 {
1824   const struct btrace_function *bfun, *callee;
1825   struct btrace_frame_cache *cache;
1826   struct btrace_call_iterator it;
1827   struct frame_info *next;
1828   struct thread_info *tinfo;
1829 
1830   next = get_next_frame (this_frame);
1831   if (next == NULL)
1832     return 0;
1833 
1834   callee = btrace_get_frame_function (next);
1835   if (callee == NULL)
1836     return 0;
1837 
1838   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1839     return 0;
1840 
1841   tinfo = inferior_thread ();
1842   if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1843     return 0;
1844 
1845   bfun = btrace_call_get (&it);
1846 
1847   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1848 	 btrace_get_bfun_name (bfun), bfun->level);
1849 
1850   /* This is our frame.  Initialize the frame cache.  */
1851   cache = bfcache_new (this_frame);
1852   cache->tp = tinfo;
1853   cache->bfun = bfun;
1854 
1855   *this_cache = cache;
1856   return 1;
1857 }
1858 
1859 static void
1860 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1861 {
1862   struct btrace_frame_cache *cache;
1863   void **slot;
1864 
1865   cache = (struct btrace_frame_cache *) this_cache;
1866 
1867   slot = htab_find_slot (bfcache, cache, NO_INSERT);
1868   gdb_assert (slot != NULL);
1869 
1870   htab_remove_elt (bfcache, cache);
1871 }
1872 
1873 /* btrace recording does not store previous memory content, neither the stack
1874    frames content.  Any unwinding would return errorneous results as the stack
1875    contents no longer matches the changed PC value restored from history.
1876    Therefore this unwinder reports any possibly unwound registers as
1877    <unavailable>.  */
1878 
1879 const struct frame_unwind record_btrace_frame_unwind =
1880 {
1881   NORMAL_FRAME,
1882   record_btrace_frame_unwind_stop_reason,
1883   record_btrace_frame_this_id,
1884   record_btrace_frame_prev_register,
1885   NULL,
1886   record_btrace_frame_sniffer,
1887   record_btrace_frame_dealloc_cache
1888 };
1889 
1890 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1891 {
1892   TAILCALL_FRAME,
1893   record_btrace_frame_unwind_stop_reason,
1894   record_btrace_frame_this_id,
1895   record_btrace_frame_prev_register,
1896   NULL,
1897   record_btrace_tailcall_frame_sniffer,
1898   record_btrace_frame_dealloc_cache
1899 };
1900 
1901 /* Implement the get_unwinder method.  */
1902 
1903 const struct frame_unwind *
1904 record_btrace_target::get_unwinder ()
1905 {
1906   return &record_btrace_frame_unwind;
1907 }
1908 
1909 /* Implement the get_tailcall_unwinder method.  */
1910 
1911 const struct frame_unwind *
1912 record_btrace_target::get_tailcall_unwinder ()
1913 {
1914   return &record_btrace_tailcall_frame_unwind;
1915 }
1916 
1917 /* Return a human-readable string for FLAG.  */
1918 
1919 static const char *
1920 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1921 {
1922   switch (flag)
1923     {
1924     case BTHR_STEP:
1925       return "step";
1926 
1927     case BTHR_RSTEP:
1928       return "reverse-step";
1929 
1930     case BTHR_CONT:
1931       return "cont";
1932 
1933     case BTHR_RCONT:
1934       return "reverse-cont";
1935 
1936     case BTHR_STOP:
1937       return "stop";
1938     }
1939 
1940   return "<invalid>";
1941 }
1942 
1943 /* Indicate that TP should be resumed according to FLAG.  */
1944 
1945 static void
1946 record_btrace_resume_thread (struct thread_info *tp,
1947 			     enum btrace_thread_flag flag)
1948 {
1949   struct btrace_thread_info *btinfo;
1950 
1951   DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1952 	 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1953 
1954   btinfo = &tp->btrace;
1955 
1956   /* Fetch the latest branch trace.  */
1957   btrace_fetch (tp, record_btrace_get_cpu ());
1958 
1959   /* A resume request overwrites a preceding resume or stop request.  */
1960   btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1961   btinfo->flags |= flag;
1962 }
1963 
1964 /* Get the current frame for TP.  */
1965 
1966 static struct frame_id
1967 get_thread_current_frame_id (struct thread_info *tp)
1968 {
1969   struct frame_id id;
1970   int executing;
1971 
1972   /* Set current thread, which is implicitly used by
1973      get_current_frame.  */
1974   scoped_restore_current_thread restore_thread;
1975 
1976   switch_to_thread (tp);
1977 
1978   /* Clear the executing flag to allow changes to the current frame.
1979      We are not actually running, yet.  We just started a reverse execution
1980      command or a record goto command.
1981      For the latter, EXECUTING is false and this has no effect.
1982      For the former, EXECUTING is true and we're in wait, about to
1983      move the thread.  Since we need to recompute the stack, we temporarily
1984      set EXECUTING to flase.  */
1985   executing = tp->executing;
1986   set_executing (inferior_ptid, false);
1987 
1988   id = null_frame_id;
1989   TRY
1990     {
1991       id = get_frame_id (get_current_frame ());
1992     }
1993   CATCH (except, RETURN_MASK_ALL)
1994     {
1995       /* Restore the previous execution state.  */
1996       set_executing (inferior_ptid, executing);
1997 
1998       throw_exception (except);
1999     }
2000   END_CATCH
2001 
2002   /* Restore the previous execution state.  */
2003   set_executing (inferior_ptid, executing);
2004 
2005   return id;
2006 }
2007 
2008 /* Start replaying a thread.  */
2009 
2010 static struct btrace_insn_iterator *
2011 record_btrace_start_replaying (struct thread_info *tp)
2012 {
2013   struct btrace_insn_iterator *replay;
2014   struct btrace_thread_info *btinfo;
2015 
2016   btinfo = &tp->btrace;
2017   replay = NULL;
2018 
2019   /* We can't start replaying without trace.  */
2020   if (btinfo->functions.empty ())
2021     return NULL;
2022 
2023   /* GDB stores the current frame_id when stepping in order to detects steps
2024      into subroutines.
2025      Since frames are computed differently when we're replaying, we need to
2026      recompute those stored frames and fix them up so we can still detect
2027      subroutines after we started replaying.  */
2028   TRY
2029     {
2030       struct frame_id frame_id;
2031       int upd_step_frame_id, upd_step_stack_frame_id;
2032 
2033       /* The current frame without replaying - computed via normal unwind.  */
2034       frame_id = get_thread_current_frame_id (tp);
2035 
2036       /* Check if we need to update any stepping-related frame id's.  */
2037       upd_step_frame_id = frame_id_eq (frame_id,
2038 				       tp->control.step_frame_id);
2039       upd_step_stack_frame_id = frame_id_eq (frame_id,
2040 					     tp->control.step_stack_frame_id);
2041 
2042       /* We start replaying at the end of the branch trace.  This corresponds
2043 	 to the current instruction.  */
2044       replay = XNEW (struct btrace_insn_iterator);
2045       btrace_insn_end (replay, btinfo);
2046 
2047       /* Skip gaps at the end of the trace.  */
2048       while (btrace_insn_get (replay) == NULL)
2049 	{
2050 	  unsigned int steps;
2051 
2052 	  steps = btrace_insn_prev (replay, 1);
2053 	  if (steps == 0)
2054 	    error (_("No trace."));
2055 	}
2056 
2057       /* We're not replaying, yet.  */
2058       gdb_assert (btinfo->replay == NULL);
2059       btinfo->replay = replay;
2060 
2061       /* Make sure we're not using any stale registers.  */
2062       registers_changed_thread (tp);
2063 
2064       /* The current frame with replaying - computed via btrace unwind.  */
2065       frame_id = get_thread_current_frame_id (tp);
2066 
2067       /* Replace stepping related frames where necessary.  */
2068       if (upd_step_frame_id)
2069 	tp->control.step_frame_id = frame_id;
2070       if (upd_step_stack_frame_id)
2071 	tp->control.step_stack_frame_id = frame_id;
2072     }
2073   CATCH (except, RETURN_MASK_ALL)
2074     {
2075       xfree (btinfo->replay);
2076       btinfo->replay = NULL;
2077 
2078       registers_changed_thread (tp);
2079 
2080       throw_exception (except);
2081     }
2082   END_CATCH
2083 
2084   return replay;
2085 }
2086 
2087 /* Stop replaying a thread.  */
2088 
2089 static void
2090 record_btrace_stop_replaying (struct thread_info *tp)
2091 {
2092   struct btrace_thread_info *btinfo;
2093 
2094   btinfo = &tp->btrace;
2095 
2096   xfree (btinfo->replay);
2097   btinfo->replay = NULL;
2098 
2099   /* Make sure we're not leaving any stale registers.  */
2100   registers_changed_thread (tp);
2101 }
2102 
2103 /* Stop replaying TP if it is at the end of its execution history.  */
2104 
2105 static void
2106 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2107 {
2108   struct btrace_insn_iterator *replay, end;
2109   struct btrace_thread_info *btinfo;
2110 
2111   btinfo = &tp->btrace;
2112   replay = btinfo->replay;
2113 
2114   if (replay == NULL)
2115     return;
2116 
2117   btrace_insn_end (&end, btinfo);
2118 
2119   if (btrace_insn_cmp (replay, &end) == 0)
2120     record_btrace_stop_replaying (tp);
2121 }
2122 
2123 /* The resume method of target record-btrace.  */
2124 
2125 void
2126 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2127 {
2128   enum btrace_thread_flag flag, cflag;
2129 
2130   DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2131 	 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2132 	 step ? "step" : "cont");
2133 
2134   /* Store the execution direction of the last resume.
2135 
2136      If there is more than one resume call, we have to rely on infrun
2137      to not change the execution direction in-between.  */
2138   record_btrace_resume_exec_dir = ::execution_direction;
2139 
2140   /* As long as we're not replaying, just forward the request.
2141 
2142      For non-stop targets this means that no thread is replaying.  In order to
2143      make progress, we may need to explicitly move replaying threads to the end
2144      of their execution history.  */
2145   if ((::execution_direction != EXEC_REVERSE)
2146       && !record_is_replaying (minus_one_ptid))
2147     {
2148       this->beneath ()->resume (ptid, step, signal);
2149       return;
2150     }
2151 
2152   /* Compute the btrace thread flag for the requested move.  */
2153   if (::execution_direction == EXEC_REVERSE)
2154     {
2155       flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2156       cflag = BTHR_RCONT;
2157     }
2158   else
2159     {
2160       flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2161       cflag = BTHR_CONT;
2162     }
2163 
2164   /* We just indicate the resume intent here.  The actual stepping happens in
2165      record_btrace_wait below.
2166 
2167      For all-stop targets, we only step INFERIOR_PTID and continue others.  */
2168   if (!target_is_non_stop_p ())
2169     {
2170       gdb_assert (inferior_ptid.matches (ptid));
2171 
2172       for (thread_info *tp : all_non_exited_threads (ptid))
2173 	{
2174 	  if (tp->ptid.matches (inferior_ptid))
2175 	    record_btrace_resume_thread (tp, flag);
2176 	  else
2177 	    record_btrace_resume_thread (tp, cflag);
2178 	}
2179     }
2180   else
2181     {
2182       for (thread_info *tp : all_non_exited_threads (ptid))
2183 	record_btrace_resume_thread (tp, flag);
2184     }
2185 
2186   /* Async support.  */
2187   if (target_can_async_p ())
2188     {
2189       target_async (1);
2190       mark_async_event_handler (record_btrace_async_inferior_event_handler);
2191     }
2192 }
2193 
2194 /* The commit_resume method of target record-btrace.  */
2195 
2196 void
2197 record_btrace_target::commit_resume ()
2198 {
2199   if ((::execution_direction != EXEC_REVERSE)
2200       && !record_is_replaying (minus_one_ptid))
2201     beneath ()->commit_resume ();
2202 }
2203 
2204 /* Cancel resuming TP.  */
2205 
2206 static void
2207 record_btrace_cancel_resume (struct thread_info *tp)
2208 {
2209   enum btrace_thread_flag flags;
2210 
2211   flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2212   if (flags == 0)
2213     return;
2214 
2215   DEBUG ("cancel resume thread %s (%s): %x (%s)",
2216 	 print_thread_id (tp),
2217 	 target_pid_to_str (tp->ptid), flags,
2218 	 btrace_thread_flag_to_str (flags));
2219 
2220   tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2221   record_btrace_stop_replaying_at_end (tp);
2222 }
2223 
2224 /* Return a target_waitstatus indicating that we ran out of history.  */
2225 
2226 static struct target_waitstatus
2227 btrace_step_no_history (void)
2228 {
2229   struct target_waitstatus status;
2230 
2231   status.kind = TARGET_WAITKIND_NO_HISTORY;
2232 
2233   return status;
2234 }
2235 
2236 /* Return a target_waitstatus indicating that a step finished.  */
2237 
2238 static struct target_waitstatus
2239 btrace_step_stopped (void)
2240 {
2241   struct target_waitstatus status;
2242 
2243   status.kind = TARGET_WAITKIND_STOPPED;
2244   status.value.sig = GDB_SIGNAL_TRAP;
2245 
2246   return status;
2247 }
2248 
2249 /* Return a target_waitstatus indicating that a thread was stopped as
2250    requested.  */
2251 
2252 static struct target_waitstatus
2253 btrace_step_stopped_on_request (void)
2254 {
2255   struct target_waitstatus status;
2256 
2257   status.kind = TARGET_WAITKIND_STOPPED;
2258   status.value.sig = GDB_SIGNAL_0;
2259 
2260   return status;
2261 }
2262 
2263 /* Return a target_waitstatus indicating a spurious stop.  */
2264 
2265 static struct target_waitstatus
2266 btrace_step_spurious (void)
2267 {
2268   struct target_waitstatus status;
2269 
2270   status.kind = TARGET_WAITKIND_SPURIOUS;
2271 
2272   return status;
2273 }
2274 
2275 /* Return a target_waitstatus indicating that the thread was not resumed.  */
2276 
2277 static struct target_waitstatus
2278 btrace_step_no_resumed (void)
2279 {
2280   struct target_waitstatus status;
2281 
2282   status.kind = TARGET_WAITKIND_NO_RESUMED;
2283 
2284   return status;
2285 }
2286 
2287 /* Return a target_waitstatus indicating that we should wait again.  */
2288 
2289 static struct target_waitstatus
2290 btrace_step_again (void)
2291 {
2292   struct target_waitstatus status;
2293 
2294   status.kind = TARGET_WAITKIND_IGNORE;
2295 
2296   return status;
2297 }
2298 
2299 /* Clear the record histories.  */
2300 
2301 static void
2302 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2303 {
2304   xfree (btinfo->insn_history);
2305   xfree (btinfo->call_history);
2306 
2307   btinfo->insn_history = NULL;
2308   btinfo->call_history = NULL;
2309 }
2310 
2311 /* Check whether TP's current replay position is at a breakpoint.  */
2312 
2313 static int
2314 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2315 {
2316   struct btrace_insn_iterator *replay;
2317   struct btrace_thread_info *btinfo;
2318   const struct btrace_insn *insn;
2319 
2320   btinfo = &tp->btrace;
2321   replay = btinfo->replay;
2322 
2323   if (replay == NULL)
2324     return 0;
2325 
2326   insn = btrace_insn_get (replay);
2327   if (insn == NULL)
2328     return 0;
2329 
2330   return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2331 					     &btinfo->stop_reason);
2332 }
2333 
2334 /* Step one instruction in forward direction.  */
2335 
2336 static struct target_waitstatus
2337 record_btrace_single_step_forward (struct thread_info *tp)
2338 {
2339   struct btrace_insn_iterator *replay, end, start;
2340   struct btrace_thread_info *btinfo;
2341 
2342   btinfo = &tp->btrace;
2343   replay = btinfo->replay;
2344 
2345   /* We're done if we're not replaying.  */
2346   if (replay == NULL)
2347     return btrace_step_no_history ();
2348 
2349   /* Check if we're stepping a breakpoint.  */
2350   if (record_btrace_replay_at_breakpoint (tp))
2351     return btrace_step_stopped ();
2352 
2353   /* Skip gaps during replay.  If we end up at a gap (at the end of the trace),
2354      jump back to the instruction at which we started.  */
2355   start = *replay;
2356   do
2357     {
2358       unsigned int steps;
2359 
2360       /* We will bail out here if we continue stepping after reaching the end
2361 	 of the execution history.  */
2362       steps = btrace_insn_next (replay, 1);
2363       if (steps == 0)
2364 	{
2365 	  *replay = start;
2366 	  return btrace_step_no_history ();
2367 	}
2368     }
2369   while (btrace_insn_get (replay) == NULL);
2370 
2371   /* Determine the end of the instruction trace.  */
2372   btrace_insn_end (&end, btinfo);
2373 
2374   /* The execution trace contains (and ends with) the current instruction.
2375      This instruction has not been executed, yet, so the trace really ends
2376      one instruction earlier.  */
2377   if (btrace_insn_cmp (replay, &end) == 0)
2378     return btrace_step_no_history ();
2379 
2380   return btrace_step_spurious ();
2381 }
2382 
2383 /* Step one instruction in backward direction.  */
2384 
2385 static struct target_waitstatus
2386 record_btrace_single_step_backward (struct thread_info *tp)
2387 {
2388   struct btrace_insn_iterator *replay, start;
2389   struct btrace_thread_info *btinfo;
2390 
2391   btinfo = &tp->btrace;
2392   replay = btinfo->replay;
2393 
2394   /* Start replaying if we're not already doing so.  */
2395   if (replay == NULL)
2396     replay = record_btrace_start_replaying (tp);
2397 
2398   /* If we can't step any further, we reached the end of the history.
2399      Skip gaps during replay.  If we end up at a gap (at the beginning of
2400      the trace), jump back to the instruction at which we started.  */
2401   start = *replay;
2402   do
2403     {
2404       unsigned int steps;
2405 
2406       steps = btrace_insn_prev (replay, 1);
2407       if (steps == 0)
2408 	{
2409 	  *replay = start;
2410 	  return btrace_step_no_history ();
2411 	}
2412     }
2413   while (btrace_insn_get (replay) == NULL);
2414 
2415   /* Check if we're stepping a breakpoint.
2416 
2417      For reverse-stepping, this check is after the step.  There is logic in
2418      infrun.c that handles reverse-stepping separately.  See, for example,
2419      proceed and adjust_pc_after_break.
2420 
2421      This code assumes that for reverse-stepping, PC points to the last
2422      de-executed instruction, whereas for forward-stepping PC points to the
2423      next to-be-executed instruction.  */
2424   if (record_btrace_replay_at_breakpoint (tp))
2425     return btrace_step_stopped ();
2426 
2427   return btrace_step_spurious ();
2428 }
2429 
2430 /* Step a single thread.  */
2431 
2432 static struct target_waitstatus
2433 record_btrace_step_thread (struct thread_info *tp)
2434 {
2435   struct btrace_thread_info *btinfo;
2436   struct target_waitstatus status;
2437   enum btrace_thread_flag flags;
2438 
2439   btinfo = &tp->btrace;
2440 
2441   flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2442   btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2443 
2444   DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2445 	 target_pid_to_str (tp->ptid), flags,
2446 	 btrace_thread_flag_to_str (flags));
2447 
2448   /* We can't step without an execution history.  */
2449   if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2450     return btrace_step_no_history ();
2451 
2452   switch (flags)
2453     {
2454     default:
2455       internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2456 
2457     case BTHR_STOP:
2458       return btrace_step_stopped_on_request ();
2459 
2460     case BTHR_STEP:
2461       status = record_btrace_single_step_forward (tp);
2462       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2463 	break;
2464 
2465       return btrace_step_stopped ();
2466 
2467     case BTHR_RSTEP:
2468       status = record_btrace_single_step_backward (tp);
2469       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2470 	break;
2471 
2472       return btrace_step_stopped ();
2473 
2474     case BTHR_CONT:
2475       status = record_btrace_single_step_forward (tp);
2476       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2477 	break;
2478 
2479       btinfo->flags |= flags;
2480       return btrace_step_again ();
2481 
2482     case BTHR_RCONT:
2483       status = record_btrace_single_step_backward (tp);
2484       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2485 	break;
2486 
2487       btinfo->flags |= flags;
2488       return btrace_step_again ();
2489     }
2490 
2491   /* We keep threads moving at the end of their execution history.  The wait
2492      method will stop the thread for whom the event is reported.  */
2493   if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2494     btinfo->flags |= flags;
2495 
2496   return status;
2497 }
2498 
2499 /* Announce further events if necessary.  */
2500 
2501 static void
2502 record_btrace_maybe_mark_async_event
2503   (const std::vector<thread_info *> &moving,
2504    const std::vector<thread_info *> &no_history)
2505 {
2506   bool more_moving = !moving.empty ();
2507   bool more_no_history = !no_history.empty ();;
2508 
2509   if (!more_moving && !more_no_history)
2510     return;
2511 
2512   if (more_moving)
2513     DEBUG ("movers pending");
2514 
2515   if (more_no_history)
2516     DEBUG ("no-history pending");
2517 
2518   mark_async_event_handler (record_btrace_async_inferior_event_handler);
2519 }
2520 
2521 /* The wait method of target record-btrace.  */
2522 
2523 ptid_t
2524 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2525 			    int options)
2526 {
2527   std::vector<thread_info *> moving;
2528   std::vector<thread_info *> no_history;
2529 
2530   DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2531 
2532   /* As long as we're not replaying, just forward the request.  */
2533   if ((::execution_direction != EXEC_REVERSE)
2534       && !record_is_replaying (minus_one_ptid))
2535     {
2536       return this->beneath ()->wait (ptid, status, options);
2537     }
2538 
2539   /* Keep a work list of moving threads.  */
2540   for (thread_info *tp : all_non_exited_threads (ptid))
2541     if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2542       moving.push_back (tp);
2543 
2544   if (moving.empty ())
2545     {
2546       *status = btrace_step_no_resumed ();
2547 
2548       DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2549 	     target_waitstatus_to_string (status).c_str ());
2550 
2551       return null_ptid;
2552     }
2553 
2554   /* Step moving threads one by one, one step each, until either one thread
2555      reports an event or we run out of threads to step.
2556 
2557      When stepping more than one thread, chances are that some threads reach
2558      the end of their execution history earlier than others.  If we reported
2559      this immediately, all-stop on top of non-stop would stop all threads and
2560      resume the same threads next time.  And we would report the same thread
2561      having reached the end of its execution history again.
2562 
2563      In the worst case, this would starve the other threads.  But even if other
2564      threads would be allowed to make progress, this would result in far too
2565      many intermediate stops.
2566 
2567      We therefore delay the reporting of "no execution history" until we have
2568      nothing else to report.  By this time, all threads should have moved to
2569      either the beginning or the end of their execution history.  There will
2570      be a single user-visible stop.  */
2571   struct thread_info *eventing = NULL;
2572   while ((eventing == NULL) && !moving.empty ())
2573     {
2574       for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2575 	{
2576 	  thread_info *tp = moving[ix];
2577 
2578 	  *status = record_btrace_step_thread (tp);
2579 
2580 	  switch (status->kind)
2581 	    {
2582 	    case TARGET_WAITKIND_IGNORE:
2583 	      ix++;
2584 	      break;
2585 
2586 	    case TARGET_WAITKIND_NO_HISTORY:
2587 	      no_history.push_back (ordered_remove (moving, ix));
2588 	      break;
2589 
2590 	    default:
2591 	      eventing = unordered_remove (moving, ix);
2592 	      break;
2593 	    }
2594 	}
2595     }
2596 
2597   if (eventing == NULL)
2598     {
2599       /* We started with at least one moving thread.  This thread must have
2600 	 either stopped or reached the end of its execution history.
2601 
2602 	 In the former case, EVENTING must not be NULL.
2603 	 In the latter case, NO_HISTORY must not be empty.  */
2604       gdb_assert (!no_history.empty ());
2605 
2606       /* We kept threads moving at the end of their execution history.  Stop
2607 	 EVENTING now that we are going to report its stop.  */
2608       eventing = unordered_remove (no_history, 0);
2609       eventing->btrace.flags &= ~BTHR_MOVE;
2610 
2611       *status = btrace_step_no_history ();
2612     }
2613 
2614   gdb_assert (eventing != NULL);
2615 
2616   /* We kept threads replaying at the end of their execution history.  Stop
2617      replaying EVENTING now that we are going to report its stop.  */
2618   record_btrace_stop_replaying_at_end (eventing);
2619 
2620   /* Stop all other threads. */
2621   if (!target_is_non_stop_p ())
2622     {
2623       for (thread_info *tp : all_non_exited_threads ())
2624 	record_btrace_cancel_resume (tp);
2625     }
2626 
2627   /* In async mode, we need to announce further events.  */
2628   if (target_is_async_p ())
2629     record_btrace_maybe_mark_async_event (moving, no_history);
2630 
2631   /* Start record histories anew from the current position.  */
2632   record_btrace_clear_histories (&eventing->btrace);
2633 
2634   /* We moved the replay position but did not update registers.  */
2635   registers_changed_thread (eventing);
2636 
2637   DEBUG ("wait ended by thread %s (%s): %s",
2638 	 print_thread_id (eventing),
2639 	 target_pid_to_str (eventing->ptid),
2640 	 target_waitstatus_to_string (status).c_str ());
2641 
2642   return eventing->ptid;
2643 }
2644 
2645 /* The stop method of target record-btrace.  */
2646 
2647 void
2648 record_btrace_target::stop (ptid_t ptid)
2649 {
2650   DEBUG ("stop %s", target_pid_to_str (ptid));
2651 
2652   /* As long as we're not replaying, just forward the request.  */
2653   if ((::execution_direction != EXEC_REVERSE)
2654       && !record_is_replaying (minus_one_ptid))
2655     {
2656       this->beneath ()->stop (ptid);
2657     }
2658   else
2659     {
2660       for (thread_info *tp : all_non_exited_threads (ptid))
2661 	{
2662 	  tp->btrace.flags &= ~BTHR_MOVE;
2663 	  tp->btrace.flags |= BTHR_STOP;
2664 	}
2665     }
2666  }
2667 
2668 /* The can_execute_reverse method of target record-btrace.  */
2669 
2670 bool
2671 record_btrace_target::can_execute_reverse ()
2672 {
2673   return true;
2674 }
2675 
2676 /* The stopped_by_sw_breakpoint method of target record-btrace.  */
2677 
2678 bool
2679 record_btrace_target::stopped_by_sw_breakpoint ()
2680 {
2681   if (record_is_replaying (minus_one_ptid))
2682     {
2683       struct thread_info *tp = inferior_thread ();
2684 
2685       return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2686     }
2687 
2688   return this->beneath ()->stopped_by_sw_breakpoint ();
2689 }
2690 
2691 /* The supports_stopped_by_sw_breakpoint method of target
2692    record-btrace.  */
2693 
2694 bool
2695 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2696 {
2697   if (record_is_replaying (minus_one_ptid))
2698     return true;
2699 
2700   return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2701 }
2702 
2703 /* The stopped_by_sw_breakpoint method of target record-btrace.  */
2704 
2705 bool
2706 record_btrace_target::stopped_by_hw_breakpoint ()
2707 {
2708   if (record_is_replaying (minus_one_ptid))
2709     {
2710       struct thread_info *tp = inferior_thread ();
2711 
2712       return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2713     }
2714 
2715   return this->beneath ()->stopped_by_hw_breakpoint ();
2716 }
2717 
2718 /* The supports_stopped_by_hw_breakpoint method of target
2719    record-btrace.  */
2720 
2721 bool
2722 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2723 {
2724   if (record_is_replaying (minus_one_ptid))
2725     return true;
2726 
2727   return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2728 }
2729 
2730 /* The update_thread_list method of target record-btrace.  */
2731 
2732 void
2733 record_btrace_target::update_thread_list ()
2734 {
2735   /* We don't add or remove threads during replay.  */
2736   if (record_is_replaying (minus_one_ptid))
2737     return;
2738 
2739   /* Forward the request.  */
2740   this->beneath ()->update_thread_list ();
2741 }
2742 
2743 /* The thread_alive method of target record-btrace.  */
2744 
2745 bool
2746 record_btrace_target::thread_alive (ptid_t ptid)
2747 {
2748   /* We don't add or remove threads during replay.  */
2749   if (record_is_replaying (minus_one_ptid))
2750     return true;
2751 
2752   /* Forward the request.  */
2753   return this->beneath ()->thread_alive (ptid);
2754 }
2755 
2756 /* Set the replay branch trace instruction iterator.  If IT is NULL, replay
2757    is stopped.  */
2758 
2759 static void
2760 record_btrace_set_replay (struct thread_info *tp,
2761 			  const struct btrace_insn_iterator *it)
2762 {
2763   struct btrace_thread_info *btinfo;
2764 
2765   btinfo = &tp->btrace;
2766 
2767   if (it == NULL)
2768     record_btrace_stop_replaying (tp);
2769   else
2770     {
2771       if (btinfo->replay == NULL)
2772 	record_btrace_start_replaying (tp);
2773       else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2774 	return;
2775 
2776       *btinfo->replay = *it;
2777       registers_changed_thread (tp);
2778     }
2779 
2780   /* Start anew from the new replay position.  */
2781   record_btrace_clear_histories (btinfo);
2782 
2783   inferior_thread ()->suspend.stop_pc
2784     = regcache_read_pc (get_current_regcache ());
2785   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2786 }
2787 
2788 /* The goto_record_begin method of target record-btrace.  */
2789 
2790 void
2791 record_btrace_target::goto_record_begin ()
2792 {
2793   struct thread_info *tp;
2794   struct btrace_insn_iterator begin;
2795 
2796   tp = require_btrace_thread ();
2797 
2798   btrace_insn_begin (&begin, &tp->btrace);
2799 
2800   /* Skip gaps at the beginning of the trace.  */
2801   while (btrace_insn_get (&begin) == NULL)
2802     {
2803       unsigned int steps;
2804 
2805       steps = btrace_insn_next (&begin, 1);
2806       if (steps == 0)
2807 	error (_("No trace."));
2808     }
2809 
2810   record_btrace_set_replay (tp, &begin);
2811 }
2812 
2813 /* The goto_record_end method of target record-btrace.  */
2814 
2815 void
2816 record_btrace_target::goto_record_end ()
2817 {
2818   struct thread_info *tp;
2819 
2820   tp = require_btrace_thread ();
2821 
2822   record_btrace_set_replay (tp, NULL);
2823 }
2824 
2825 /* The goto_record method of target record-btrace.  */
2826 
2827 void
2828 record_btrace_target::goto_record (ULONGEST insn)
2829 {
2830   struct thread_info *tp;
2831   struct btrace_insn_iterator it;
2832   unsigned int number;
2833   int found;
2834 
2835   number = insn;
2836 
2837   /* Check for wrap-arounds.  */
2838   if (number != insn)
2839     error (_("Instruction number out of range."));
2840 
2841   tp = require_btrace_thread ();
2842 
2843   found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2844 
2845   /* Check if the instruction could not be found or is a gap.  */
2846   if (found == 0 || btrace_insn_get (&it) == NULL)
2847     error (_("No such instruction."));
2848 
2849   record_btrace_set_replay (tp, &it);
2850 }
2851 
2852 /* The record_stop_replaying method of target record-btrace.  */
2853 
2854 void
2855 record_btrace_target::record_stop_replaying ()
2856 {
2857   for (thread_info *tp : all_non_exited_threads ())
2858     record_btrace_stop_replaying (tp);
2859 }
2860 
2861 /* The execution_direction target method.  */
2862 
2863 enum exec_direction_kind
2864 record_btrace_target::execution_direction ()
2865 {
2866   return record_btrace_resume_exec_dir;
2867 }
2868 
2869 /* The prepare_to_generate_core target method.  */
2870 
2871 void
2872 record_btrace_target::prepare_to_generate_core ()
2873 {
2874   record_btrace_generating_corefile = 1;
2875 }
2876 
2877 /* The done_generating_core target method.  */
2878 
2879 void
2880 record_btrace_target::done_generating_core ()
2881 {
2882   record_btrace_generating_corefile = 0;
2883 }
2884 
2885 /* Start recording in BTS format.  */
2886 
2887 static void
2888 cmd_record_btrace_bts_start (const char *args, int from_tty)
2889 {
2890   if (args != NULL && *args != 0)
2891     error (_("Invalid argument."));
2892 
2893   record_btrace_conf.format = BTRACE_FORMAT_BTS;
2894 
2895   TRY
2896     {
2897       execute_command ("target record-btrace", from_tty);
2898     }
2899   CATCH (exception, RETURN_MASK_ALL)
2900     {
2901       record_btrace_conf.format = BTRACE_FORMAT_NONE;
2902       throw_exception (exception);
2903     }
2904   END_CATCH
2905 }
2906 
2907 /* Start recording in Intel Processor Trace format.  */
2908 
2909 static void
2910 cmd_record_btrace_pt_start (const char *args, int from_tty)
2911 {
2912   if (args != NULL && *args != 0)
2913     error (_("Invalid argument."));
2914 
2915   record_btrace_conf.format = BTRACE_FORMAT_PT;
2916 
2917   TRY
2918     {
2919       execute_command ("target record-btrace", from_tty);
2920     }
2921   CATCH (exception, RETURN_MASK_ALL)
2922     {
2923       record_btrace_conf.format = BTRACE_FORMAT_NONE;
2924       throw_exception (exception);
2925     }
2926   END_CATCH
2927 }
2928 
2929 /* Alias for "target record".  */
2930 
2931 static void
2932 cmd_record_btrace_start (const char *args, int from_tty)
2933 {
2934   if (args != NULL && *args != 0)
2935     error (_("Invalid argument."));
2936 
2937   record_btrace_conf.format = BTRACE_FORMAT_PT;
2938 
2939   TRY
2940     {
2941       execute_command ("target record-btrace", from_tty);
2942     }
2943   CATCH (exception, RETURN_MASK_ALL)
2944     {
2945       record_btrace_conf.format = BTRACE_FORMAT_BTS;
2946 
2947       TRY
2948 	{
2949 	  execute_command ("target record-btrace", from_tty);
2950 	}
2951       CATCH (ex, RETURN_MASK_ALL)
2952 	{
2953 	  record_btrace_conf.format = BTRACE_FORMAT_NONE;
2954 	  throw_exception (ex);
2955 	}
2956       END_CATCH
2957     }
2958   END_CATCH
2959 }
2960 
2961 /* The "set record btrace" command.  */
2962 
2963 static void
2964 cmd_set_record_btrace (const char *args, int from_tty)
2965 {
2966   printf_unfiltered (_("\"set record btrace\" must be followed "
2967 		       "by an appropriate subcommand.\n"));
2968   help_list (set_record_btrace_cmdlist, "set record btrace ",
2969 	     all_commands, gdb_stdout);
2970 }
2971 
2972 /* The "show record btrace" command.  */
2973 
2974 static void
2975 cmd_show_record_btrace (const char *args, int from_tty)
2976 {
2977   cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2978 }
2979 
2980 /* The "show record btrace replay-memory-access" command.  */
2981 
2982 static void
2983 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2984 			       struct cmd_list_element *c, const char *value)
2985 {
2986   fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2987 		    replay_memory_access);
2988 }
2989 
2990 /* The "set record btrace cpu none" command.  */
2991 
2992 static void
2993 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2994 {
2995   if (args != nullptr && *args != 0)
2996     error (_("Trailing junk: '%s'."), args);
2997 
2998   record_btrace_cpu_state = CS_NONE;
2999 }
3000 
3001 /* The "set record btrace cpu auto" command.  */
3002 
3003 static void
3004 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3005 {
3006   if (args != nullptr && *args != 0)
3007     error (_("Trailing junk: '%s'."), args);
3008 
3009   record_btrace_cpu_state = CS_AUTO;
3010 }
3011 
3012 /* The "set record btrace cpu" command.  */
3013 
3014 static void
3015 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3016 {
3017   if (args == nullptr)
3018     args = "";
3019 
3020   /* We use a hard-coded vendor string for now.  */
3021   unsigned int family, model, stepping;
3022   int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3023 				&model, &l1, &stepping, &l2);
3024   if (matches == 3)
3025     {
3026       if (strlen (args) != l2)
3027 	error (_("Trailing junk: '%s'."), args + l2);
3028     }
3029   else if (matches == 2)
3030     {
3031       if (strlen (args) != l1)
3032 	error (_("Trailing junk: '%s'."), args + l1);
3033 
3034       stepping = 0;
3035     }
3036   else
3037     error (_("Bad format.  See \"help set record btrace cpu\"."));
3038 
3039   if (USHRT_MAX < family)
3040     error (_("Cpu family too big."));
3041 
3042   if (UCHAR_MAX < model)
3043     error (_("Cpu model too big."));
3044 
3045   if (UCHAR_MAX < stepping)
3046     error (_("Cpu stepping too big."));
3047 
3048   record_btrace_cpu.vendor = CV_INTEL;
3049   record_btrace_cpu.family = family;
3050   record_btrace_cpu.model = model;
3051   record_btrace_cpu.stepping = stepping;
3052 
3053   record_btrace_cpu_state = CS_CPU;
3054 }
3055 
3056 /* The "show record btrace cpu" command.  */
3057 
3058 static void
3059 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3060 {
3061   if (args != nullptr && *args != 0)
3062     error (_("Trailing junk: '%s'."), args);
3063 
3064   switch (record_btrace_cpu_state)
3065     {
3066     case CS_AUTO:
3067       printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3068       return;
3069 
3070     case CS_NONE:
3071       printf_unfiltered (_("btrace cpu is 'none'.\n"));
3072       return;
3073 
3074     case CS_CPU:
3075       switch (record_btrace_cpu.vendor)
3076 	{
3077 	case CV_INTEL:
3078 	  if (record_btrace_cpu.stepping == 0)
3079 	    printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3080 			       record_btrace_cpu.family,
3081 			       record_btrace_cpu.model);
3082 	  else
3083 	    printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3084 			       record_btrace_cpu.family,
3085 			       record_btrace_cpu.model,
3086 			       record_btrace_cpu.stepping);
3087 	  return;
3088 	}
3089     }
3090 
3091   error (_("Internal error: bad cpu state."));
3092 }
3093 
3094 /* The "s record btrace bts" command.  */
3095 
3096 static void
3097 cmd_set_record_btrace_bts (const char *args, int from_tty)
3098 {
3099   printf_unfiltered (_("\"set record btrace bts\" must be followed "
3100 		       "by an appropriate subcommand.\n"));
3101   help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3102 	     all_commands, gdb_stdout);
3103 }
3104 
3105 /* The "show record btrace bts" command.  */
3106 
3107 static void
3108 cmd_show_record_btrace_bts (const char *args, int from_tty)
3109 {
3110   cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3111 }
3112 
3113 /* The "set record btrace pt" command.  */
3114 
3115 static void
3116 cmd_set_record_btrace_pt (const char *args, int from_tty)
3117 {
3118   printf_unfiltered (_("\"set record btrace pt\" must be followed "
3119 		       "by an appropriate subcommand.\n"));
3120   help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3121 	     all_commands, gdb_stdout);
3122 }
3123 
3124 /* The "show record btrace pt" command.  */
3125 
3126 static void
3127 cmd_show_record_btrace_pt (const char *args, int from_tty)
3128 {
3129   cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3130 }
3131 
3132 /* The "record bts buffer-size" show value function.  */
3133 
3134 static void
3135 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3136 				   struct cmd_list_element *c,
3137 				   const char *value)
3138 {
3139   fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3140 		    value);
3141 }
3142 
3143 /* The "record pt buffer-size" show value function.  */
3144 
3145 static void
3146 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3147 				  struct cmd_list_element *c,
3148 				  const char *value)
3149 {
3150   fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3151 		    value);
3152 }
3153 
3154 /* Initialize btrace commands.  */
3155 
3156 void
3157 _initialize_record_btrace (void)
3158 {
3159   add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3160 		  _("Start branch trace recording."), &record_btrace_cmdlist,
3161 		  "record btrace ", 0, &record_cmdlist);
3162   add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3163 
3164   add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3165 	   _("\
3166 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3167 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3168 This format may not be available on all processors."),
3169 	   &record_btrace_cmdlist);
3170   add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3171 
3172   add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3173 	   _("\
3174 Start branch trace recording in Intel Processor Trace format.\n\n\
3175 This format may not be available on all processors."),
3176 	   &record_btrace_cmdlist);
3177   add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3178 
3179   add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3180 		  _("Set record options"), &set_record_btrace_cmdlist,
3181 		  "set record btrace ", 0, &set_record_cmdlist);
3182 
3183   add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3184 		  _("Show record options"), &show_record_btrace_cmdlist,
3185 		  "show record btrace ", 0, &show_record_cmdlist);
3186 
3187   add_setshow_enum_cmd ("replay-memory-access", no_class,
3188 			replay_memory_access_types, &replay_memory_access, _("\
3189 Set what memory accesses are allowed during replay."), _("\
3190 Show what memory accesses are allowed during replay."),
3191 			   _("Default is READ-ONLY.\n\n\
3192 The btrace record target does not trace data.\n\
3193 The memory therefore corresponds to the live target and not \
3194 to the current replay position.\n\n\
3195 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3196 When READ-WRITE, allow accesses to read-only and read-write memory during \
3197 replay."),
3198 			   NULL, cmd_show_replay_memory_access,
3199 			   &set_record_btrace_cmdlist,
3200 			   &show_record_btrace_cmdlist);
3201 
3202   add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3203 		  _("\
3204 Set the cpu to be used for trace decode.\n\n\
3205 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3206 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3207 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3208 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3209 When GDB does not support that cpu, this option can be used to enable\n\
3210 workarounds for a similar cpu that GDB supports.\n\n\
3211 When set to \"none\", errata workarounds are disabled."),
3212 		  &set_record_btrace_cpu_cmdlist,
3213 		  _("set record btrace cpu "), 1,
3214 		  &set_record_btrace_cmdlist);
3215 
3216   add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3217 Automatically determine the cpu to be used for trace decode."),
3218 	   &set_record_btrace_cpu_cmdlist);
3219 
3220   add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3221 Do not enable errata workarounds for trace decode."),
3222 	   &set_record_btrace_cpu_cmdlist);
3223 
3224   add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3225 Show the cpu to be used for trace decode."),
3226 	   &show_record_btrace_cmdlist);
3227 
3228   add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3229 		  _("Set record btrace bts options"),
3230 		  &set_record_btrace_bts_cmdlist,
3231 		  "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3232 
3233   add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3234 		  _("Show record btrace bts options"),
3235 		  &show_record_btrace_bts_cmdlist,
3236 		  "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3237 
3238   add_setshow_uinteger_cmd ("buffer-size", no_class,
3239 			    &record_btrace_conf.bts.size,
3240 			    _("Set the record/replay bts buffer size."),
3241 			    _("Show the record/replay bts buffer size."), _("\
3242 When starting recording request a trace buffer of this size.  \
3243 The actual buffer size may differ from the requested size.  \
3244 Use \"info record\" to see the actual buffer size.\n\n\
3245 Bigger buffers allow longer recording but also take more time to process \
3246 the recorded execution trace.\n\n\
3247 The trace buffer size may not be changed while recording."), NULL,
3248 			    show_record_bts_buffer_size_value,
3249 			    &set_record_btrace_bts_cmdlist,
3250 			    &show_record_btrace_bts_cmdlist);
3251 
3252   add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3253 		  _("Set record btrace pt options"),
3254 		  &set_record_btrace_pt_cmdlist,
3255 		  "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3256 
3257   add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3258 		  _("Show record btrace pt options"),
3259 		  &show_record_btrace_pt_cmdlist,
3260 		  "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3261 
3262   add_setshow_uinteger_cmd ("buffer-size", no_class,
3263 			    &record_btrace_conf.pt.size,
3264 			    _("Set the record/replay pt buffer size."),
3265 			    _("Show the record/replay pt buffer size."), _("\
3266 Bigger buffers allow longer recording but also take more time to process \
3267 the recorded execution.\n\
3268 The actual buffer size may differ from the requested size.  Use \"info record\" \
3269 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3270 			    &set_record_btrace_pt_cmdlist,
3271 			    &show_record_btrace_pt_cmdlist);
3272 
3273   add_target (record_btrace_target_info, record_btrace_target_open);
3274 
3275   bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3276 			       xcalloc, xfree);
3277 
3278   record_btrace_conf.bts.size = 64 * 1024;
3279   record_btrace_conf.pt.size = 16 * 1024;
3280 }
3281