1 /* Branch trace support for GDB, the GNU debugger. 2 3 Copyright (C) 2013-2023 Free Software Foundation, Inc. 4 5 Contributed by Intel Corp. <markus.t.metzger@intel.com> 6 7 This file is part of GDB. 8 9 This program is free software; you can redistribute it and/or modify 10 it under the terms of the GNU General Public License as published by 11 the Free Software Foundation; either version 3 of the License, or 12 (at your option) any later version. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 21 22 #include "defs.h" 23 #include "record.h" 24 #include "record-btrace.h" 25 #include "gdbthread.h" 26 #include "target.h" 27 #include "gdbcmd.h" 28 #include "disasm.h" 29 #include "observable.h" 30 #include "cli/cli-utils.h" 31 #include "source.h" 32 #include "ui-out.h" 33 #include "symtab.h" 34 #include "filenames.h" 35 #include "regcache.h" 36 #include "frame-unwind.h" 37 #include "hashtab.h" 38 #include "infrun.h" 39 #include "gdbsupport/event-loop.h" 40 #include "inf-loop.h" 41 #include "inferior.h" 42 #include <algorithm> 43 #include "gdbarch.h" 44 #include "cli/cli-style.h" 45 #include "async-event.h" 46 #include <forward_list> 47 48 static const target_info record_btrace_target_info = { 49 "record-btrace", 50 N_("Branch tracing target"), 51 N_("Collect control-flow trace and provide the execution history.") 52 }; 53 54 /* The target_ops of record-btrace. */ 55 56 class record_btrace_target final : public target_ops 57 { 58 public: 59 const target_info &info () const override 60 { return record_btrace_target_info; } 61 62 strata stratum () const override { return record_stratum; } 63 64 void close () override; 65 void async (bool) override; 66 67 void detach (inferior *inf, int from_tty) override 68 { record_detach (this, inf, from_tty); } 69 70 void disconnect (const char *, int) override; 71 72 void mourn_inferior () override 73 { record_mourn_inferior (this); } 74 75 void kill () override 76 { record_kill (this); } 77 78 enum record_method record_method (ptid_t ptid) override; 79 80 void stop_recording () override; 81 void info_record () override; 82 83 void insn_history (int size, gdb_disassembly_flags flags) override; 84 void insn_history_from (ULONGEST from, int size, 85 gdb_disassembly_flags flags) override; 86 void insn_history_range (ULONGEST begin, ULONGEST end, 87 gdb_disassembly_flags flags) override; 88 void call_history (int size, record_print_flags flags) override; 89 void call_history_from (ULONGEST begin, int size, record_print_flags flags) 90 override; 91 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags) 92 override; 93 94 bool record_is_replaying (ptid_t ptid) override; 95 bool record_will_replay (ptid_t ptid, int dir) override; 96 void record_stop_replaying () override; 97 98 enum target_xfer_status xfer_partial (enum target_object object, 99 const char *annex, 100 gdb_byte *readbuf, 101 const gdb_byte *writebuf, 102 ULONGEST offset, ULONGEST len, 103 ULONGEST *xfered_len) override; 104 105 int insert_breakpoint (struct gdbarch *, 106 struct bp_target_info *) override; 107 int remove_breakpoint (struct gdbarch *, struct bp_target_info *, 108 enum remove_bp_reason) override; 109 110 void fetch_registers (struct regcache *, int) override; 111 112 void store_registers (struct regcache *, int) override; 113 void prepare_to_store (struct regcache *) override; 114 115 const struct frame_unwind *get_unwinder () override; 116 117 const struct frame_unwind *get_tailcall_unwinder () override; 118 119 void resume (ptid_t, int, enum gdb_signal) override; 120 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override; 121 122 void stop (ptid_t) override; 123 void update_thread_list () override; 124 bool thread_alive (ptid_t ptid) override; 125 void goto_record_begin () override; 126 void goto_record_end () override; 127 void goto_record (ULONGEST insn) override; 128 129 bool can_execute_reverse () override; 130 131 bool stopped_by_sw_breakpoint () override; 132 bool supports_stopped_by_sw_breakpoint () override; 133 134 bool stopped_by_hw_breakpoint () override; 135 bool supports_stopped_by_hw_breakpoint () override; 136 137 enum exec_direction_kind execution_direction () override; 138 void prepare_to_generate_core () override; 139 void done_generating_core () override; 140 }; 141 142 static record_btrace_target record_btrace_ops; 143 144 /* Initialize the record-btrace target ops. */ 145 146 /* Token associated with a new-thread observer enabling branch tracing 147 for the new thread. */ 148 static const gdb::observers::token record_btrace_thread_observer_token {}; 149 150 /* Memory access types used in set/show record btrace replay-memory-access. */ 151 static const char replay_memory_access_read_only[] = "read-only"; 152 static const char replay_memory_access_read_write[] = "read-write"; 153 static const char *const replay_memory_access_types[] = 154 { 155 replay_memory_access_read_only, 156 replay_memory_access_read_write, 157 NULL 158 }; 159 160 /* The currently allowed replay memory access type. */ 161 static const char *replay_memory_access = replay_memory_access_read_only; 162 163 /* The cpu state kinds. */ 164 enum record_btrace_cpu_state_kind 165 { 166 CS_AUTO, 167 CS_NONE, 168 CS_CPU 169 }; 170 171 /* The current cpu state. */ 172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO; 173 174 /* The current cpu for trace decode. */ 175 static struct btrace_cpu record_btrace_cpu; 176 177 /* Command lists for "set/show record btrace". */ 178 static struct cmd_list_element *set_record_btrace_cmdlist; 179 static struct cmd_list_element *show_record_btrace_cmdlist; 180 181 /* The execution direction of the last resume we got. See record-full.c. */ 182 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD; 183 184 /* The async event handler for reverse/replay execution. */ 185 static struct async_event_handler *record_btrace_async_inferior_event_handler; 186 187 /* A flag indicating that we are currently generating a core file. */ 188 static int record_btrace_generating_corefile; 189 190 /* The current branch trace configuration. */ 191 static struct btrace_config record_btrace_conf; 192 193 /* Command list for "record btrace". */ 194 static struct cmd_list_element *record_btrace_cmdlist; 195 196 /* Command lists for "set/show record btrace bts". */ 197 static struct cmd_list_element *set_record_btrace_bts_cmdlist; 198 static struct cmd_list_element *show_record_btrace_bts_cmdlist; 199 200 /* Command lists for "set/show record btrace pt". */ 201 static struct cmd_list_element *set_record_btrace_pt_cmdlist; 202 static struct cmd_list_element *show_record_btrace_pt_cmdlist; 203 204 /* Command list for "set record btrace cpu". */ 205 static struct cmd_list_element *set_record_btrace_cpu_cmdlist; 206 207 /* Print a record-btrace debug message. Use do ... while (0) to avoid 208 ambiguities when used in if statements. */ 209 210 #define DEBUG(msg, args...) \ 211 do \ 212 { \ 213 if (record_debug != 0) \ 214 gdb_printf (gdb_stdlog, \ 215 "[record-btrace] " msg "\n", ##args); \ 216 } \ 217 while (0) 218 219 220 /* Return the cpu configured by the user. Returns NULL if the cpu was 221 configured as auto. */ 222 const struct btrace_cpu * 223 record_btrace_get_cpu (void) 224 { 225 switch (record_btrace_cpu_state) 226 { 227 case CS_AUTO: 228 return nullptr; 229 230 case CS_NONE: 231 record_btrace_cpu.vendor = CV_UNKNOWN; 232 /* Fall through. */ 233 case CS_CPU: 234 return &record_btrace_cpu; 235 } 236 237 error (_("Internal error: bad record btrace cpu state.")); 238 } 239 240 /* Update the branch trace for the current thread and return a pointer to its 241 thread_info. 242 243 Throws an error if there is no thread or no trace. This function never 244 returns NULL. */ 245 246 static struct thread_info * 247 require_btrace_thread (void) 248 { 249 DEBUG ("require"); 250 251 if (inferior_ptid == null_ptid) 252 error (_("No thread.")); 253 254 thread_info *tp = inferior_thread (); 255 256 validate_registers_access (); 257 258 btrace_fetch (tp, record_btrace_get_cpu ()); 259 260 if (btrace_is_empty (tp)) 261 error (_("No trace.")); 262 263 return tp; 264 } 265 266 /* Update the branch trace for the current thread and return a pointer to its 267 branch trace information struct. 268 269 Throws an error if there is no thread or no trace. This function never 270 returns NULL. */ 271 272 static struct btrace_thread_info * 273 require_btrace (void) 274 { 275 struct thread_info *tp; 276 277 tp = require_btrace_thread (); 278 279 return &tp->btrace; 280 } 281 282 /* The new thread observer. */ 283 284 static void 285 record_btrace_on_new_thread (struct thread_info *tp) 286 { 287 /* Ignore this thread if its inferior is not recorded by us. */ 288 target_ops *rec = tp->inf->target_at (record_stratum); 289 if (rec != &record_btrace_ops) 290 return; 291 292 try 293 { 294 btrace_enable (tp, &record_btrace_conf); 295 } 296 catch (const gdb_exception_error &error) 297 { 298 warning ("%s", error.what ()); 299 } 300 } 301 302 /* Enable automatic tracing of new threads. */ 303 304 static void 305 record_btrace_auto_enable (void) 306 { 307 DEBUG ("attach thread observer"); 308 309 gdb::observers::new_thread.attach (record_btrace_on_new_thread, 310 record_btrace_thread_observer_token, 311 "record-btrace"); 312 } 313 314 /* Disable automatic tracing of new threads. */ 315 316 static void 317 record_btrace_auto_disable (void) 318 { 319 DEBUG ("detach thread observer"); 320 321 gdb::observers::new_thread.detach (record_btrace_thread_observer_token); 322 } 323 324 /* The record-btrace async event handler function. */ 325 326 static void 327 record_btrace_handle_async_inferior_event (gdb_client_data data) 328 { 329 inferior_event_handler (INF_REG_EVENT); 330 } 331 332 /* See record-btrace.h. */ 333 334 void 335 record_btrace_push_target (void) 336 { 337 const char *format; 338 339 record_btrace_auto_enable (); 340 341 current_inferior ()->push_target (&record_btrace_ops); 342 343 record_btrace_async_inferior_event_handler 344 = create_async_event_handler (record_btrace_handle_async_inferior_event, 345 NULL, "record-btrace"); 346 record_btrace_generating_corefile = 0; 347 348 format = btrace_format_short_string (record_btrace_conf.format); 349 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format); 350 } 351 352 /* Disable btrace on a set of threads on scope exit. */ 353 354 struct scoped_btrace_disable 355 { 356 scoped_btrace_disable () = default; 357 358 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable); 359 360 ~scoped_btrace_disable () 361 { 362 for (thread_info *tp : m_threads) 363 btrace_disable (tp); 364 } 365 366 void add_thread (thread_info *thread) 367 { 368 m_threads.push_front (thread); 369 } 370 371 void discard () 372 { 373 m_threads.clear (); 374 } 375 376 private: 377 std::forward_list<thread_info *> m_threads; 378 }; 379 380 /* Open target record-btrace. */ 381 382 static void 383 record_btrace_target_open (const char *args, int from_tty) 384 { 385 /* If we fail to enable btrace for one thread, disable it for the threads for 386 which it was successfully enabled. */ 387 scoped_btrace_disable btrace_disable; 388 389 DEBUG ("open"); 390 391 record_preopen (); 392 393 if (!target_has_execution ()) 394 error (_("The program is not being run.")); 395 396 for (thread_info *tp : current_inferior ()->non_exited_threads ()) 397 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num)) 398 { 399 btrace_enable (tp, &record_btrace_conf); 400 401 btrace_disable.add_thread (tp); 402 } 403 404 record_btrace_push_target (); 405 406 btrace_disable.discard (); 407 } 408 409 /* The stop_recording method of target record-btrace. */ 410 411 void 412 record_btrace_target::stop_recording () 413 { 414 DEBUG ("stop recording"); 415 416 record_btrace_auto_disable (); 417 418 for (thread_info *tp : current_inferior ()->non_exited_threads ()) 419 if (tp->btrace.target != NULL) 420 btrace_disable (tp); 421 } 422 423 /* The disconnect method of target record-btrace. */ 424 425 void 426 record_btrace_target::disconnect (const char *args, 427 int from_tty) 428 { 429 struct target_ops *beneath = this->beneath (); 430 431 /* Do not stop recording, just clean up GDB side. */ 432 current_inferior ()->unpush_target (this); 433 434 /* Forward disconnect. */ 435 beneath->disconnect (args, from_tty); 436 } 437 438 /* The close method of target record-btrace. */ 439 440 void 441 record_btrace_target::close () 442 { 443 if (record_btrace_async_inferior_event_handler != NULL) 444 delete_async_event_handler (&record_btrace_async_inferior_event_handler); 445 446 /* Make sure automatic recording gets disabled even if we did not stop 447 recording before closing the record-btrace target. */ 448 record_btrace_auto_disable (); 449 450 /* We should have already stopped recording. 451 Tear down btrace in case we have not. */ 452 for (thread_info *tp : current_inferior ()->non_exited_threads ()) 453 btrace_teardown (tp); 454 } 455 456 /* The async method of target record-btrace. */ 457 458 void 459 record_btrace_target::async (bool enable) 460 { 461 if (enable) 462 mark_async_event_handler (record_btrace_async_inferior_event_handler); 463 else 464 clear_async_event_handler (record_btrace_async_inferior_event_handler); 465 466 this->beneath ()->async (enable); 467 } 468 469 /* Adjusts the size and returns a human readable size suffix. */ 470 471 static const char * 472 record_btrace_adjust_size (unsigned int *size) 473 { 474 unsigned int sz; 475 476 sz = *size; 477 478 if ((sz & ((1u << 30) - 1)) == 0) 479 { 480 *size = sz >> 30; 481 return "GB"; 482 } 483 else if ((sz & ((1u << 20) - 1)) == 0) 484 { 485 *size = sz >> 20; 486 return "MB"; 487 } 488 else if ((sz & ((1u << 10) - 1)) == 0) 489 { 490 *size = sz >> 10; 491 return "kB"; 492 } 493 else 494 return ""; 495 } 496 497 /* Print a BTS configuration. */ 498 499 static void 500 record_btrace_print_bts_conf (const struct btrace_config_bts *conf) 501 { 502 const char *suffix; 503 unsigned int size; 504 505 size = conf->size; 506 if (size > 0) 507 { 508 suffix = record_btrace_adjust_size (&size); 509 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix); 510 } 511 } 512 513 /* Print an Intel Processor Trace configuration. */ 514 515 static void 516 record_btrace_print_pt_conf (const struct btrace_config_pt *conf) 517 { 518 const char *suffix; 519 unsigned int size; 520 521 size = conf->size; 522 if (size > 0) 523 { 524 suffix = record_btrace_adjust_size (&size); 525 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix); 526 } 527 } 528 529 /* Print a branch tracing configuration. */ 530 531 static void 532 record_btrace_print_conf (const struct btrace_config *conf) 533 { 534 gdb_printf (_("Recording format: %s.\n"), 535 btrace_format_string (conf->format)); 536 537 switch (conf->format) 538 { 539 case BTRACE_FORMAT_NONE: 540 return; 541 542 case BTRACE_FORMAT_BTS: 543 record_btrace_print_bts_conf (&conf->bts); 544 return; 545 546 case BTRACE_FORMAT_PT: 547 record_btrace_print_pt_conf (&conf->pt); 548 return; 549 } 550 551 internal_error (_("Unknown branch trace format.")); 552 } 553 554 /* The info_record method of target record-btrace. */ 555 556 void 557 record_btrace_target::info_record () 558 { 559 struct btrace_thread_info *btinfo; 560 const struct btrace_config *conf; 561 struct thread_info *tp; 562 unsigned int insns, calls, gaps; 563 564 DEBUG ("info"); 565 566 if (inferior_ptid == null_ptid) 567 error (_("No thread.")); 568 569 tp = inferior_thread (); 570 571 validate_registers_access (); 572 573 btinfo = &tp->btrace; 574 575 conf = ::btrace_conf (btinfo); 576 if (conf != NULL) 577 record_btrace_print_conf (conf); 578 579 btrace_fetch (tp, record_btrace_get_cpu ()); 580 581 insns = 0; 582 calls = 0; 583 gaps = 0; 584 585 if (!btrace_is_empty (tp)) 586 { 587 struct btrace_call_iterator call; 588 struct btrace_insn_iterator insn; 589 590 btrace_call_end (&call, btinfo); 591 btrace_call_prev (&call, 1); 592 calls = btrace_call_number (&call); 593 594 btrace_insn_end (&insn, btinfo); 595 insns = btrace_insn_number (&insn); 596 597 /* If the last instruction is not a gap, it is the current instruction 598 that is not actually part of the record. */ 599 if (btrace_insn_get (&insn) != NULL) 600 insns -= 1; 601 602 gaps = btinfo->ngaps; 603 } 604 605 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) " 606 "for thread %s (%s).\n"), insns, calls, gaps, 607 print_thread_id (tp), 608 target_pid_to_str (tp->ptid).c_str ()); 609 610 if (btrace_is_replaying (tp)) 611 gdb_printf (_("Replay in progress. At instruction %u.\n"), 612 btrace_insn_number (btinfo->replay)); 613 } 614 615 /* Print a decode error. */ 616 617 static void 618 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode, 619 enum btrace_format format) 620 { 621 const char *errstr = btrace_decode_error (format, errcode); 622 623 uiout->text (_("[")); 624 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */ 625 if (!(format == BTRACE_FORMAT_PT && errcode > 0)) 626 { 627 uiout->text (_("decode error (")); 628 uiout->field_signed ("errcode", errcode); 629 uiout->text (_("): ")); 630 } 631 uiout->text (errstr); 632 uiout->text (_("]\n")); 633 } 634 635 /* A range of source lines. */ 636 637 struct btrace_line_range 638 { 639 /* The symtab this line is from. */ 640 struct symtab *symtab; 641 642 /* The first line (inclusive). */ 643 int begin; 644 645 /* The last line (exclusive). */ 646 int end; 647 }; 648 649 /* Construct a line range. */ 650 651 static struct btrace_line_range 652 btrace_mk_line_range (struct symtab *symtab, int begin, int end) 653 { 654 struct btrace_line_range range; 655 656 range.symtab = symtab; 657 range.begin = begin; 658 range.end = end; 659 660 return range; 661 } 662 663 /* Add a line to a line range. */ 664 665 static struct btrace_line_range 666 btrace_line_range_add (struct btrace_line_range range, int line) 667 { 668 if (range.end <= range.begin) 669 { 670 /* This is the first entry. */ 671 range.begin = line; 672 range.end = line + 1; 673 } 674 else if (line < range.begin) 675 range.begin = line; 676 else if (range.end < line) 677 range.end = line; 678 679 return range; 680 } 681 682 /* Return non-zero if RANGE is empty, zero otherwise. */ 683 684 static int 685 btrace_line_range_is_empty (struct btrace_line_range range) 686 { 687 return range.end <= range.begin; 688 } 689 690 /* Return non-zero if LHS contains RHS, zero otherwise. */ 691 692 static int 693 btrace_line_range_contains_range (struct btrace_line_range lhs, 694 struct btrace_line_range rhs) 695 { 696 return ((lhs.symtab == rhs.symtab) 697 && (lhs.begin <= rhs.begin) 698 && (rhs.end <= lhs.end)); 699 } 700 701 /* Find the line range associated with PC. */ 702 703 static struct btrace_line_range 704 btrace_find_line_range (CORE_ADDR pc) 705 { 706 struct btrace_line_range range; 707 struct linetable_entry *lines; 708 struct linetable *ltable; 709 struct symtab *symtab; 710 int nlines, i; 711 712 symtab = find_pc_line_symtab (pc); 713 if (symtab == NULL) 714 return btrace_mk_line_range (NULL, 0, 0); 715 716 ltable = symtab->linetable (); 717 if (ltable == NULL) 718 return btrace_mk_line_range (symtab, 0, 0); 719 720 nlines = ltable->nitems; 721 lines = ltable->item; 722 if (nlines <= 0) 723 return btrace_mk_line_range (symtab, 0, 0); 724 725 range = btrace_mk_line_range (symtab, 0, 0); 726 for (i = 0; i < nlines - 1; i++) 727 { 728 /* The test of is_stmt here was added when the is_stmt field was 729 introduced to the 'struct linetable_entry' structure. This 730 ensured that this loop maintained the same behaviour as before we 731 introduced is_stmt. That said, it might be that we would be 732 better off not checking is_stmt here, this would lead to us 733 possibly adding more line numbers to the range. At the time this 734 change was made I was unsure how to test this so chose to go with 735 maintaining the existing experience. */ 736 if ((lines[i].pc == pc) && (lines[i].line != 0) 737 && (lines[i].is_stmt == 1)) 738 range = btrace_line_range_add (range, lines[i].line); 739 } 740 741 return range; 742 } 743 744 /* Print source lines in LINES to UIOUT. 745 746 UI_ITEM_CHAIN is a cleanup chain for the last source line and the 747 instructions corresponding to that source line. When printing a new source 748 line, we do the cleanups for the open chain and open a new cleanup chain for 749 the new source line. If the source line range in LINES is not empty, this 750 function will leave the cleanup chain for the last printed source line open 751 so instructions can be added to it. */ 752 753 static void 754 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout, 755 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple, 756 gdb::optional<ui_out_emit_list> *asm_list, 757 gdb_disassembly_flags flags) 758 { 759 print_source_lines_flags psl_flags; 760 761 if (flags & DISASSEMBLY_FILENAME) 762 psl_flags |= PRINT_SOURCE_LINES_FILENAME; 763 764 for (int line = lines.begin; line < lines.end; ++line) 765 { 766 asm_list->reset (); 767 768 src_and_asm_tuple->emplace (uiout, "src_and_asm_line"); 769 770 print_source_lines (lines.symtab, line, line + 1, psl_flags); 771 772 asm_list->emplace (uiout, "line_asm_insn"); 773 } 774 } 775 776 /* Disassemble a section of the recorded instruction trace. */ 777 778 static void 779 btrace_insn_history (struct ui_out *uiout, 780 const struct btrace_thread_info *btinfo, 781 const struct btrace_insn_iterator *begin, 782 const struct btrace_insn_iterator *end, 783 gdb_disassembly_flags flags) 784 { 785 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags, 786 btrace_insn_number (begin), btrace_insn_number (end)); 787 788 flags |= DISASSEMBLY_SPECULATIVE; 789 790 struct gdbarch *gdbarch = target_gdbarch (); 791 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0); 792 793 ui_out_emit_list list_emitter (uiout, "asm_insns"); 794 795 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple; 796 gdb::optional<ui_out_emit_list> asm_list; 797 798 gdb_pretty_print_disassembler disasm (gdbarch, uiout); 799 800 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0; 801 btrace_insn_next (&it, 1)) 802 { 803 const struct btrace_insn *insn; 804 805 insn = btrace_insn_get (&it); 806 807 /* A NULL instruction indicates a gap in the trace. */ 808 if (insn == NULL) 809 { 810 const struct btrace_config *conf; 811 812 conf = btrace_conf (btinfo); 813 814 /* We have trace so we must have a configuration. */ 815 gdb_assert (conf != NULL); 816 817 uiout->field_fmt ("insn-number", "%u", 818 btrace_insn_number (&it)); 819 uiout->text ("\t"); 820 821 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it), 822 conf->format); 823 } 824 else 825 { 826 struct disasm_insn dinsn; 827 828 if ((flags & DISASSEMBLY_SOURCE) != 0) 829 { 830 struct btrace_line_range lines; 831 832 lines = btrace_find_line_range (insn->pc); 833 if (!btrace_line_range_is_empty (lines) 834 && !btrace_line_range_contains_range (last_lines, lines)) 835 { 836 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list, 837 flags); 838 last_lines = lines; 839 } 840 else if (!src_and_asm_tuple.has_value ()) 841 { 842 gdb_assert (!asm_list.has_value ()); 843 844 src_and_asm_tuple.emplace (uiout, "src_and_asm_line"); 845 846 /* No source information. */ 847 asm_list.emplace (uiout, "line_asm_insn"); 848 } 849 850 gdb_assert (src_and_asm_tuple.has_value ()); 851 gdb_assert (asm_list.has_value ()); 852 } 853 854 memset (&dinsn, 0, sizeof (dinsn)); 855 dinsn.number = btrace_insn_number (&it); 856 dinsn.addr = insn->pc; 857 858 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0) 859 dinsn.is_speculative = 1; 860 861 disasm.pretty_print_insn (&dinsn, flags); 862 } 863 } 864 } 865 866 /* The insn_history method of target record-btrace. */ 867 868 void 869 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags) 870 { 871 struct btrace_thread_info *btinfo; 872 struct btrace_insn_history *history; 873 struct btrace_insn_iterator begin, end; 874 struct ui_out *uiout; 875 unsigned int context, covered; 876 877 uiout = current_uiout; 878 ui_out_emit_tuple tuple_emitter (uiout, "insn history"); 879 context = abs (size); 880 if (context == 0) 881 error (_("Bad record instruction-history-size.")); 882 883 btinfo = require_btrace (); 884 history = btinfo->insn_history; 885 if (history == NULL) 886 { 887 struct btrace_insn_iterator *replay; 888 889 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size); 890 891 /* If we're replaying, we start at the replay position. Otherwise, we 892 start at the tail of the trace. */ 893 replay = btinfo->replay; 894 if (replay != NULL) 895 begin = *replay; 896 else 897 btrace_insn_end (&begin, btinfo); 898 899 /* We start from here and expand in the requested direction. Then we 900 expand in the other direction, as well, to fill up any remaining 901 context. */ 902 end = begin; 903 if (size < 0) 904 { 905 /* We want the current position covered, as well. */ 906 covered = btrace_insn_next (&end, 1); 907 covered += btrace_insn_prev (&begin, context - covered); 908 covered += btrace_insn_next (&end, context - covered); 909 } 910 else 911 { 912 covered = btrace_insn_next (&end, context); 913 covered += btrace_insn_prev (&begin, context - covered); 914 } 915 } 916 else 917 { 918 begin = history->begin; 919 end = history->end; 920 921 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size, 922 btrace_insn_number (&begin), btrace_insn_number (&end)); 923 924 if (size < 0) 925 { 926 end = begin; 927 covered = btrace_insn_prev (&begin, context); 928 } 929 else 930 { 931 begin = end; 932 covered = btrace_insn_next (&end, context); 933 } 934 } 935 936 if (covered > 0) 937 btrace_insn_history (uiout, btinfo, &begin, &end, flags); 938 else 939 { 940 if (size < 0) 941 gdb_printf (_("At the start of the branch trace record.\n")); 942 else 943 gdb_printf (_("At the end of the branch trace record.\n")); 944 } 945 946 btrace_set_insn_history (btinfo, &begin, &end); 947 } 948 949 /* The insn_history_range method of target record-btrace. */ 950 951 void 952 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to, 953 gdb_disassembly_flags flags) 954 { 955 struct btrace_thread_info *btinfo; 956 struct btrace_insn_iterator begin, end; 957 struct ui_out *uiout; 958 unsigned int low, high; 959 int found; 960 961 uiout = current_uiout; 962 ui_out_emit_tuple tuple_emitter (uiout, "insn history"); 963 low = from; 964 high = to; 965 966 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high); 967 968 /* Check for wrap-arounds. */ 969 if (low != from || high != to) 970 error (_("Bad range.")); 971 972 if (high < low) 973 error (_("Bad range.")); 974 975 btinfo = require_btrace (); 976 977 found = btrace_find_insn_by_number (&begin, btinfo, low); 978 if (found == 0) 979 error (_("Range out of bounds.")); 980 981 found = btrace_find_insn_by_number (&end, btinfo, high); 982 if (found == 0) 983 { 984 /* Silently truncate the range. */ 985 btrace_insn_end (&end, btinfo); 986 } 987 else 988 { 989 /* We want both begin and end to be inclusive. */ 990 btrace_insn_next (&end, 1); 991 } 992 993 btrace_insn_history (uiout, btinfo, &begin, &end, flags); 994 btrace_set_insn_history (btinfo, &begin, &end); 995 } 996 997 /* The insn_history_from method of target record-btrace. */ 998 999 void 1000 record_btrace_target::insn_history_from (ULONGEST from, int size, 1001 gdb_disassembly_flags flags) 1002 { 1003 ULONGEST begin, end, context; 1004 1005 context = abs (size); 1006 if (context == 0) 1007 error (_("Bad record instruction-history-size.")); 1008 1009 if (size < 0) 1010 { 1011 end = from; 1012 1013 if (from < context) 1014 begin = 0; 1015 else 1016 begin = from - context + 1; 1017 } 1018 else 1019 { 1020 begin = from; 1021 end = from + context - 1; 1022 1023 /* Check for wrap-around. */ 1024 if (end < begin) 1025 end = ULONGEST_MAX; 1026 } 1027 1028 insn_history_range (begin, end, flags); 1029 } 1030 1031 /* Print the instruction number range for a function call history line. */ 1032 1033 static void 1034 btrace_call_history_insn_range (struct ui_out *uiout, 1035 const struct btrace_function *bfun) 1036 { 1037 unsigned int begin, end, size; 1038 1039 size = bfun->insn.size (); 1040 gdb_assert (size > 0); 1041 1042 begin = bfun->insn_offset; 1043 end = begin + size - 1; 1044 1045 uiout->field_unsigned ("insn begin", begin); 1046 uiout->text (","); 1047 uiout->field_unsigned ("insn end", end); 1048 } 1049 1050 /* Compute the lowest and highest source line for the instructions in BFUN 1051 and return them in PBEGIN and PEND. 1052 Ignore instructions that can't be mapped to BFUN, e.g. instructions that 1053 result from inlining or macro expansion. */ 1054 1055 static void 1056 btrace_compute_src_line_range (const struct btrace_function *bfun, 1057 int *pbegin, int *pend) 1058 { 1059 struct symtab *symtab; 1060 struct symbol *sym; 1061 int begin, end; 1062 1063 begin = INT_MAX; 1064 end = INT_MIN; 1065 1066 sym = bfun->sym; 1067 if (sym == NULL) 1068 goto out; 1069 1070 symtab = sym->symtab (); 1071 1072 for (const btrace_insn &insn : bfun->insn) 1073 { 1074 struct symtab_and_line sal; 1075 1076 sal = find_pc_line (insn.pc, 0); 1077 if (sal.symtab != symtab || sal.line == 0) 1078 continue; 1079 1080 begin = std::min (begin, sal.line); 1081 end = std::max (end, sal.line); 1082 } 1083 1084 out: 1085 *pbegin = begin; 1086 *pend = end; 1087 } 1088 1089 /* Print the source line information for a function call history line. */ 1090 1091 static void 1092 btrace_call_history_src_line (struct ui_out *uiout, 1093 const struct btrace_function *bfun) 1094 { 1095 struct symbol *sym; 1096 int begin, end; 1097 1098 sym = bfun->sym; 1099 if (sym == NULL) 1100 return; 1101 1102 uiout->field_string ("file", 1103 symtab_to_filename_for_display (sym->symtab ()), 1104 file_name_style.style ()); 1105 1106 btrace_compute_src_line_range (bfun, &begin, &end); 1107 if (end < begin) 1108 return; 1109 1110 uiout->text (":"); 1111 uiout->field_signed ("min line", begin); 1112 1113 if (end == begin) 1114 return; 1115 1116 uiout->text (","); 1117 uiout->field_signed ("max line", end); 1118 } 1119 1120 /* Get the name of a branch trace function. */ 1121 1122 static const char * 1123 btrace_get_bfun_name (const struct btrace_function *bfun) 1124 { 1125 struct minimal_symbol *msym; 1126 struct symbol *sym; 1127 1128 if (bfun == NULL) 1129 return "??"; 1130 1131 msym = bfun->msym; 1132 sym = bfun->sym; 1133 1134 if (sym != NULL) 1135 return sym->print_name (); 1136 else if (msym != NULL) 1137 return msym->print_name (); 1138 else 1139 return "??"; 1140 } 1141 1142 /* Disassemble a section of the recorded function trace. */ 1143 1144 static void 1145 btrace_call_history (struct ui_out *uiout, 1146 const struct btrace_thread_info *btinfo, 1147 const struct btrace_call_iterator *begin, 1148 const struct btrace_call_iterator *end, 1149 int int_flags) 1150 { 1151 struct btrace_call_iterator it; 1152 record_print_flags flags = (enum record_print_flag) int_flags; 1153 1154 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin), 1155 btrace_call_number (end)); 1156 1157 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1)) 1158 { 1159 const struct btrace_function *bfun; 1160 struct minimal_symbol *msym; 1161 struct symbol *sym; 1162 1163 bfun = btrace_call_get (&it); 1164 sym = bfun->sym; 1165 msym = bfun->msym; 1166 1167 /* Print the function index. */ 1168 uiout->field_unsigned ("index", bfun->number); 1169 uiout->text ("\t"); 1170 1171 /* Indicate gaps in the trace. */ 1172 if (bfun->errcode != 0) 1173 { 1174 const struct btrace_config *conf; 1175 1176 conf = btrace_conf (btinfo); 1177 1178 /* We have trace so we must have a configuration. */ 1179 gdb_assert (conf != NULL); 1180 1181 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format); 1182 1183 continue; 1184 } 1185 1186 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0) 1187 { 1188 int level = bfun->level + btinfo->level, i; 1189 1190 for (i = 0; i < level; ++i) 1191 uiout->text (" "); 1192 } 1193 1194 if (sym != NULL) 1195 uiout->field_string ("function", sym->print_name (), 1196 function_name_style.style ()); 1197 else if (msym != NULL) 1198 uiout->field_string ("function", msym->print_name (), 1199 function_name_style.style ()); 1200 else if (!uiout->is_mi_like_p ()) 1201 uiout->field_string ("function", "??", 1202 function_name_style.style ()); 1203 1204 if ((flags & RECORD_PRINT_INSN_RANGE) != 0) 1205 { 1206 uiout->text (_("\tinst ")); 1207 btrace_call_history_insn_range (uiout, bfun); 1208 } 1209 1210 if ((flags & RECORD_PRINT_SRC_LINE) != 0) 1211 { 1212 uiout->text (_("\tat ")); 1213 btrace_call_history_src_line (uiout, bfun); 1214 } 1215 1216 uiout->text ("\n"); 1217 } 1218 } 1219 1220 /* The call_history method of target record-btrace. */ 1221 1222 void 1223 record_btrace_target::call_history (int size, record_print_flags flags) 1224 { 1225 struct btrace_thread_info *btinfo; 1226 struct btrace_call_history *history; 1227 struct btrace_call_iterator begin, end; 1228 struct ui_out *uiout; 1229 unsigned int context, covered; 1230 1231 uiout = current_uiout; 1232 ui_out_emit_tuple tuple_emitter (uiout, "insn history"); 1233 context = abs (size); 1234 if (context == 0) 1235 error (_("Bad record function-call-history-size.")); 1236 1237 btinfo = require_btrace (); 1238 history = btinfo->call_history; 1239 if (history == NULL) 1240 { 1241 struct btrace_insn_iterator *replay; 1242 1243 DEBUG ("call-history (0x%x): %d", (int) flags, size); 1244 1245 /* If we're replaying, we start at the replay position. Otherwise, we 1246 start at the tail of the trace. */ 1247 replay = btinfo->replay; 1248 if (replay != NULL) 1249 { 1250 begin.btinfo = btinfo; 1251 begin.index = replay->call_index; 1252 } 1253 else 1254 btrace_call_end (&begin, btinfo); 1255 1256 /* We start from here and expand in the requested direction. Then we 1257 expand in the other direction, as well, to fill up any remaining 1258 context. */ 1259 end = begin; 1260 if (size < 0) 1261 { 1262 /* We want the current position covered, as well. */ 1263 covered = btrace_call_next (&end, 1); 1264 covered += btrace_call_prev (&begin, context - covered); 1265 covered += btrace_call_next (&end, context - covered); 1266 } 1267 else 1268 { 1269 covered = btrace_call_next (&end, context); 1270 covered += btrace_call_prev (&begin, context- covered); 1271 } 1272 } 1273 else 1274 { 1275 begin = history->begin; 1276 end = history->end; 1277 1278 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size, 1279 btrace_call_number (&begin), btrace_call_number (&end)); 1280 1281 if (size < 0) 1282 { 1283 end = begin; 1284 covered = btrace_call_prev (&begin, context); 1285 } 1286 else 1287 { 1288 begin = end; 1289 covered = btrace_call_next (&end, context); 1290 } 1291 } 1292 1293 if (covered > 0) 1294 btrace_call_history (uiout, btinfo, &begin, &end, flags); 1295 else 1296 { 1297 if (size < 0) 1298 gdb_printf (_("At the start of the branch trace record.\n")); 1299 else 1300 gdb_printf (_("At the end of the branch trace record.\n")); 1301 } 1302 1303 btrace_set_call_history (btinfo, &begin, &end); 1304 } 1305 1306 /* The call_history_range method of target record-btrace. */ 1307 1308 void 1309 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to, 1310 record_print_flags flags) 1311 { 1312 struct btrace_thread_info *btinfo; 1313 struct btrace_call_iterator begin, end; 1314 struct ui_out *uiout; 1315 unsigned int low, high; 1316 int found; 1317 1318 uiout = current_uiout; 1319 ui_out_emit_tuple tuple_emitter (uiout, "func history"); 1320 low = from; 1321 high = to; 1322 1323 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high); 1324 1325 /* Check for wrap-arounds. */ 1326 if (low != from || high != to) 1327 error (_("Bad range.")); 1328 1329 if (high < low) 1330 error (_("Bad range.")); 1331 1332 btinfo = require_btrace (); 1333 1334 found = btrace_find_call_by_number (&begin, btinfo, low); 1335 if (found == 0) 1336 error (_("Range out of bounds.")); 1337 1338 found = btrace_find_call_by_number (&end, btinfo, high); 1339 if (found == 0) 1340 { 1341 /* Silently truncate the range. */ 1342 btrace_call_end (&end, btinfo); 1343 } 1344 else 1345 { 1346 /* We want both begin and end to be inclusive. */ 1347 btrace_call_next (&end, 1); 1348 } 1349 1350 btrace_call_history (uiout, btinfo, &begin, &end, flags); 1351 btrace_set_call_history (btinfo, &begin, &end); 1352 } 1353 1354 /* The call_history_from method of target record-btrace. */ 1355 1356 void 1357 record_btrace_target::call_history_from (ULONGEST from, int size, 1358 record_print_flags flags) 1359 { 1360 ULONGEST begin, end, context; 1361 1362 context = abs (size); 1363 if (context == 0) 1364 error (_("Bad record function-call-history-size.")); 1365 1366 if (size < 0) 1367 { 1368 end = from; 1369 1370 if (from < context) 1371 begin = 0; 1372 else 1373 begin = from - context + 1; 1374 } 1375 else 1376 { 1377 begin = from; 1378 end = from + context - 1; 1379 1380 /* Check for wrap-around. */ 1381 if (end < begin) 1382 end = ULONGEST_MAX; 1383 } 1384 1385 call_history_range ( begin, end, flags); 1386 } 1387 1388 /* The record_method method of target record-btrace. */ 1389 1390 enum record_method 1391 record_btrace_target::record_method (ptid_t ptid) 1392 { 1393 process_stratum_target *proc_target = current_inferior ()->process_target (); 1394 thread_info *const tp = find_thread_ptid (proc_target, ptid); 1395 1396 if (tp == NULL) 1397 error (_("No thread.")); 1398 1399 if (tp->btrace.target == NULL) 1400 return RECORD_METHOD_NONE; 1401 1402 return RECORD_METHOD_BTRACE; 1403 } 1404 1405 /* The record_is_replaying method of target record-btrace. */ 1406 1407 bool 1408 record_btrace_target::record_is_replaying (ptid_t ptid) 1409 { 1410 process_stratum_target *proc_target = current_inferior ()->process_target (); 1411 for (thread_info *tp : all_non_exited_threads (proc_target, ptid)) 1412 if (btrace_is_replaying (tp)) 1413 return true; 1414 1415 return false; 1416 } 1417 1418 /* The record_will_replay method of target record-btrace. */ 1419 1420 bool 1421 record_btrace_target::record_will_replay (ptid_t ptid, int dir) 1422 { 1423 return dir == EXEC_REVERSE || record_is_replaying (ptid); 1424 } 1425 1426 /* The xfer_partial method of target record-btrace. */ 1427 1428 enum target_xfer_status 1429 record_btrace_target::xfer_partial (enum target_object object, 1430 const char *annex, gdb_byte *readbuf, 1431 const gdb_byte *writebuf, ULONGEST offset, 1432 ULONGEST len, ULONGEST *xfered_len) 1433 { 1434 /* Filter out requests that don't make sense during replay. */ 1435 if (replay_memory_access == replay_memory_access_read_only 1436 && !record_btrace_generating_corefile 1437 && record_is_replaying (inferior_ptid)) 1438 { 1439 switch (object) 1440 { 1441 case TARGET_OBJECT_MEMORY: 1442 { 1443 const struct target_section *section; 1444 1445 /* We do not allow writing memory in general. */ 1446 if (writebuf != NULL) 1447 { 1448 *xfered_len = len; 1449 return TARGET_XFER_UNAVAILABLE; 1450 } 1451 1452 /* We allow reading readonly memory. */ 1453 section = target_section_by_addr (this, offset); 1454 if (section != NULL) 1455 { 1456 /* Check if the section we found is readonly. */ 1457 if ((bfd_section_flags (section->the_bfd_section) 1458 & SEC_READONLY) != 0) 1459 { 1460 /* Truncate the request to fit into this section. */ 1461 len = std::min (len, section->endaddr - offset); 1462 break; 1463 } 1464 } 1465 1466 *xfered_len = len; 1467 return TARGET_XFER_UNAVAILABLE; 1468 } 1469 } 1470 } 1471 1472 /* Forward the request. */ 1473 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf, 1474 offset, len, xfered_len); 1475 } 1476 1477 /* The insert_breakpoint method of target record-btrace. */ 1478 1479 int 1480 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch, 1481 struct bp_target_info *bp_tgt) 1482 { 1483 const char *old; 1484 int ret; 1485 1486 /* Inserting breakpoints requires accessing memory. Allow it for the 1487 duration of this function. */ 1488 old = replay_memory_access; 1489 replay_memory_access = replay_memory_access_read_write; 1490 1491 ret = 0; 1492 try 1493 { 1494 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt); 1495 } 1496 catch (const gdb_exception &except) 1497 { 1498 replay_memory_access = old; 1499 throw; 1500 } 1501 replay_memory_access = old; 1502 1503 return ret; 1504 } 1505 1506 /* The remove_breakpoint method of target record-btrace. */ 1507 1508 int 1509 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch, 1510 struct bp_target_info *bp_tgt, 1511 enum remove_bp_reason reason) 1512 { 1513 const char *old; 1514 int ret; 1515 1516 /* Removing breakpoints requires accessing memory. Allow it for the 1517 duration of this function. */ 1518 old = replay_memory_access; 1519 replay_memory_access = replay_memory_access_read_write; 1520 1521 ret = 0; 1522 try 1523 { 1524 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason); 1525 } 1526 catch (const gdb_exception &except) 1527 { 1528 replay_memory_access = old; 1529 throw; 1530 } 1531 replay_memory_access = old; 1532 1533 return ret; 1534 } 1535 1536 /* The fetch_registers method of target record-btrace. */ 1537 1538 void 1539 record_btrace_target::fetch_registers (struct regcache *regcache, int regno) 1540 { 1541 btrace_insn_iterator *replay = nullptr; 1542 1543 /* Thread-db may ask for a thread's registers before GDB knows about the 1544 thread. We forward the request to the target beneath in this 1545 case. */ 1546 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ()); 1547 if (tp != nullptr) 1548 replay = tp->btrace.replay; 1549 1550 if (replay != nullptr && !record_btrace_generating_corefile) 1551 { 1552 const struct btrace_insn *insn; 1553 struct gdbarch *gdbarch; 1554 int pcreg; 1555 1556 gdbarch = regcache->arch (); 1557 pcreg = gdbarch_pc_regnum (gdbarch); 1558 if (pcreg < 0) 1559 return; 1560 1561 /* We can only provide the PC register. */ 1562 if (regno >= 0 && regno != pcreg) 1563 return; 1564 1565 insn = btrace_insn_get (replay); 1566 gdb_assert (insn != NULL); 1567 1568 regcache->raw_supply (regno, &insn->pc); 1569 } 1570 else 1571 this->beneath ()->fetch_registers (regcache, regno); 1572 } 1573 1574 /* The store_registers method of target record-btrace. */ 1575 1576 void 1577 record_btrace_target::store_registers (struct regcache *regcache, int regno) 1578 { 1579 if (!record_btrace_generating_corefile 1580 && record_is_replaying (regcache->ptid ())) 1581 error (_("Cannot write registers while replaying.")); 1582 1583 gdb_assert (may_write_registers); 1584 1585 this->beneath ()->store_registers (regcache, regno); 1586 } 1587 1588 /* The prepare_to_store method of target record-btrace. */ 1589 1590 void 1591 record_btrace_target::prepare_to_store (struct regcache *regcache) 1592 { 1593 if (!record_btrace_generating_corefile 1594 && record_is_replaying (regcache->ptid ())) 1595 return; 1596 1597 this->beneath ()->prepare_to_store (regcache); 1598 } 1599 1600 /* The branch trace frame cache. */ 1601 1602 struct btrace_frame_cache 1603 { 1604 /* The thread. */ 1605 struct thread_info *tp; 1606 1607 /* The frame info. */ 1608 frame_info *frame; 1609 1610 /* The branch trace function segment. */ 1611 const struct btrace_function *bfun; 1612 }; 1613 1614 /* A struct btrace_frame_cache hash table indexed by NEXT. */ 1615 1616 static htab_t bfcache; 1617 1618 /* hash_f for htab_create_alloc of bfcache. */ 1619 1620 static hashval_t 1621 bfcache_hash (const void *arg) 1622 { 1623 const struct btrace_frame_cache *cache 1624 = (const struct btrace_frame_cache *) arg; 1625 1626 return htab_hash_pointer (cache->frame); 1627 } 1628 1629 /* eq_f for htab_create_alloc of bfcache. */ 1630 1631 static int 1632 bfcache_eq (const void *arg1, const void *arg2) 1633 { 1634 const struct btrace_frame_cache *cache1 1635 = (const struct btrace_frame_cache *) arg1; 1636 const struct btrace_frame_cache *cache2 1637 = (const struct btrace_frame_cache *) arg2; 1638 1639 return cache1->frame == cache2->frame; 1640 } 1641 1642 /* Create a new btrace frame cache. */ 1643 1644 static struct btrace_frame_cache * 1645 bfcache_new (frame_info_ptr frame) 1646 { 1647 struct btrace_frame_cache *cache; 1648 void **slot; 1649 1650 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache); 1651 cache->frame = frame.get (); 1652 1653 slot = htab_find_slot (bfcache, cache, INSERT); 1654 gdb_assert (*slot == NULL); 1655 *slot = cache; 1656 1657 return cache; 1658 } 1659 1660 /* Extract the branch trace function from a branch trace frame. */ 1661 1662 static const struct btrace_function * 1663 btrace_get_frame_function (frame_info_ptr frame) 1664 { 1665 const struct btrace_frame_cache *cache; 1666 struct btrace_frame_cache pattern; 1667 void **slot; 1668 1669 pattern.frame = frame.get (); 1670 1671 slot = htab_find_slot (bfcache, &pattern, NO_INSERT); 1672 if (slot == NULL) 1673 return NULL; 1674 1675 cache = (const struct btrace_frame_cache *) *slot; 1676 return cache->bfun; 1677 } 1678 1679 /* Implement stop_reason method for record_btrace_frame_unwind. */ 1680 1681 static enum unwind_stop_reason 1682 record_btrace_frame_unwind_stop_reason (frame_info_ptr this_frame, 1683 void **this_cache) 1684 { 1685 const struct btrace_frame_cache *cache; 1686 const struct btrace_function *bfun; 1687 1688 cache = (const struct btrace_frame_cache *) *this_cache; 1689 bfun = cache->bfun; 1690 gdb_assert (bfun != NULL); 1691 1692 if (bfun->up == 0) 1693 return UNWIND_UNAVAILABLE; 1694 1695 return UNWIND_NO_REASON; 1696 } 1697 1698 /* Implement this_id method for record_btrace_frame_unwind. */ 1699 1700 static void 1701 record_btrace_frame_this_id (frame_info_ptr this_frame, void **this_cache, 1702 struct frame_id *this_id) 1703 { 1704 const struct btrace_frame_cache *cache; 1705 const struct btrace_function *bfun; 1706 struct btrace_call_iterator it; 1707 CORE_ADDR code, special; 1708 1709 cache = (const struct btrace_frame_cache *) *this_cache; 1710 1711 bfun = cache->bfun; 1712 gdb_assert (bfun != NULL); 1713 1714 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0) 1715 bfun = btrace_call_get (&it); 1716 1717 code = get_frame_func (this_frame); 1718 special = bfun->number; 1719 1720 *this_id = frame_id_build_unavailable_stack_special (code, special); 1721 1722 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)", 1723 btrace_get_bfun_name (cache->bfun), 1724 core_addr_to_string_nz (this_id->code_addr), 1725 core_addr_to_string_nz (this_id->special_addr)); 1726 } 1727 1728 /* Implement prev_register method for record_btrace_frame_unwind. */ 1729 1730 static struct value * 1731 record_btrace_frame_prev_register (frame_info_ptr this_frame, 1732 void **this_cache, 1733 int regnum) 1734 { 1735 const struct btrace_frame_cache *cache; 1736 const struct btrace_function *bfun, *caller; 1737 struct btrace_call_iterator it; 1738 struct gdbarch *gdbarch; 1739 CORE_ADDR pc; 1740 int pcreg; 1741 1742 gdbarch = get_frame_arch (this_frame); 1743 pcreg = gdbarch_pc_regnum (gdbarch); 1744 if (pcreg < 0 || regnum != pcreg) 1745 throw_error (NOT_AVAILABLE_ERROR, 1746 _("Registers are not available in btrace record history")); 1747 1748 cache = (const struct btrace_frame_cache *) *this_cache; 1749 bfun = cache->bfun; 1750 gdb_assert (bfun != NULL); 1751 1752 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0) 1753 throw_error (NOT_AVAILABLE_ERROR, 1754 _("No caller in btrace record history")); 1755 1756 caller = btrace_call_get (&it); 1757 1758 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0) 1759 pc = caller->insn.front ().pc; 1760 else 1761 { 1762 pc = caller->insn.back ().pc; 1763 pc += gdb_insn_length (gdbarch, pc); 1764 } 1765 1766 DEBUG ("[frame] unwound PC in %s on level %d: %s", 1767 btrace_get_bfun_name (bfun), bfun->level, 1768 core_addr_to_string_nz (pc)); 1769 1770 return frame_unwind_got_address (this_frame, regnum, pc); 1771 } 1772 1773 /* Implement sniffer method for record_btrace_frame_unwind. */ 1774 1775 static int 1776 record_btrace_frame_sniffer (const struct frame_unwind *self, 1777 frame_info_ptr this_frame, 1778 void **this_cache) 1779 { 1780 const struct btrace_function *bfun; 1781 struct btrace_frame_cache *cache; 1782 struct thread_info *tp; 1783 frame_info_ptr next; 1784 1785 /* THIS_FRAME does not contain a reference to its thread. */ 1786 tp = inferior_thread (); 1787 1788 bfun = NULL; 1789 next = get_next_frame (this_frame); 1790 if (next == NULL) 1791 { 1792 const struct btrace_insn_iterator *replay; 1793 1794 replay = tp->btrace.replay; 1795 if (replay != NULL) 1796 bfun = &replay->btinfo->functions[replay->call_index]; 1797 } 1798 else 1799 { 1800 const struct btrace_function *callee; 1801 struct btrace_call_iterator it; 1802 1803 callee = btrace_get_frame_function (next); 1804 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0) 1805 return 0; 1806 1807 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0) 1808 return 0; 1809 1810 bfun = btrace_call_get (&it); 1811 } 1812 1813 if (bfun == NULL) 1814 return 0; 1815 1816 DEBUG ("[frame] sniffed frame for %s on level %d", 1817 btrace_get_bfun_name (bfun), bfun->level); 1818 1819 /* This is our frame. Initialize the frame cache. */ 1820 cache = bfcache_new (this_frame); 1821 cache->tp = tp; 1822 cache->bfun = bfun; 1823 1824 *this_cache = cache; 1825 return 1; 1826 } 1827 1828 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */ 1829 1830 static int 1831 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self, 1832 frame_info_ptr this_frame, 1833 void **this_cache) 1834 { 1835 const struct btrace_function *bfun, *callee; 1836 struct btrace_frame_cache *cache; 1837 struct btrace_call_iterator it; 1838 frame_info_ptr next; 1839 struct thread_info *tinfo; 1840 1841 next = get_next_frame (this_frame); 1842 if (next == NULL) 1843 return 0; 1844 1845 callee = btrace_get_frame_function (next); 1846 if (callee == NULL) 1847 return 0; 1848 1849 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) 1850 return 0; 1851 1852 tinfo = inferior_thread (); 1853 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0) 1854 return 0; 1855 1856 bfun = btrace_call_get (&it); 1857 1858 DEBUG ("[frame] sniffed tailcall frame for %s on level %d", 1859 btrace_get_bfun_name (bfun), bfun->level); 1860 1861 /* This is our frame. Initialize the frame cache. */ 1862 cache = bfcache_new (this_frame); 1863 cache->tp = tinfo; 1864 cache->bfun = bfun; 1865 1866 *this_cache = cache; 1867 return 1; 1868 } 1869 1870 static void 1871 record_btrace_frame_dealloc_cache (frame_info *self, void *this_cache) 1872 { 1873 struct btrace_frame_cache *cache; 1874 void **slot; 1875 1876 cache = (struct btrace_frame_cache *) this_cache; 1877 1878 slot = htab_find_slot (bfcache, cache, NO_INSERT); 1879 gdb_assert (slot != NULL); 1880 1881 htab_remove_elt (bfcache, cache); 1882 } 1883 1884 /* btrace recording does not store previous memory content, neither the stack 1885 frames content. Any unwinding would return erroneous results as the stack 1886 contents no longer matches the changed PC value restored from history. 1887 Therefore this unwinder reports any possibly unwound registers as 1888 <unavailable>. */ 1889 1890 const struct frame_unwind record_btrace_frame_unwind = 1891 { 1892 "record-btrace", 1893 NORMAL_FRAME, 1894 record_btrace_frame_unwind_stop_reason, 1895 record_btrace_frame_this_id, 1896 record_btrace_frame_prev_register, 1897 NULL, 1898 record_btrace_frame_sniffer, 1899 record_btrace_frame_dealloc_cache 1900 }; 1901 1902 const struct frame_unwind record_btrace_tailcall_frame_unwind = 1903 { 1904 "record-btrace tailcall", 1905 TAILCALL_FRAME, 1906 record_btrace_frame_unwind_stop_reason, 1907 record_btrace_frame_this_id, 1908 record_btrace_frame_prev_register, 1909 NULL, 1910 record_btrace_tailcall_frame_sniffer, 1911 record_btrace_frame_dealloc_cache 1912 }; 1913 1914 /* Implement the get_unwinder method. */ 1915 1916 const struct frame_unwind * 1917 record_btrace_target::get_unwinder () 1918 { 1919 return &record_btrace_frame_unwind; 1920 } 1921 1922 /* Implement the get_tailcall_unwinder method. */ 1923 1924 const struct frame_unwind * 1925 record_btrace_target::get_tailcall_unwinder () 1926 { 1927 return &record_btrace_tailcall_frame_unwind; 1928 } 1929 1930 /* Return a human-readable string for FLAG. */ 1931 1932 static const char * 1933 btrace_thread_flag_to_str (btrace_thread_flags flag) 1934 { 1935 switch (flag) 1936 { 1937 case BTHR_STEP: 1938 return "step"; 1939 1940 case BTHR_RSTEP: 1941 return "reverse-step"; 1942 1943 case BTHR_CONT: 1944 return "cont"; 1945 1946 case BTHR_RCONT: 1947 return "reverse-cont"; 1948 1949 case BTHR_STOP: 1950 return "stop"; 1951 } 1952 1953 return "<invalid>"; 1954 } 1955 1956 /* Indicate that TP should be resumed according to FLAG. */ 1957 1958 static void 1959 record_btrace_resume_thread (struct thread_info *tp, 1960 enum btrace_thread_flag flag) 1961 { 1962 struct btrace_thread_info *btinfo; 1963 1964 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp), 1965 tp->ptid.to_string ().c_str (), flag, 1966 btrace_thread_flag_to_str (flag)); 1967 1968 btinfo = &tp->btrace; 1969 1970 /* Fetch the latest branch trace. */ 1971 btrace_fetch (tp, record_btrace_get_cpu ()); 1972 1973 /* A resume request overwrites a preceding resume or stop request. */ 1974 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP); 1975 btinfo->flags |= flag; 1976 } 1977 1978 /* Get the current frame for TP. */ 1979 1980 static struct frame_id 1981 get_thread_current_frame_id (struct thread_info *tp) 1982 { 1983 /* Set current thread, which is implicitly used by 1984 get_current_frame. */ 1985 scoped_restore_current_thread restore_thread; 1986 1987 switch_to_thread (tp); 1988 1989 process_stratum_target *proc_target = tp->inf->process_target (); 1990 1991 /* Clear the executing flag to allow changes to the current frame. 1992 We are not actually running, yet. We just started a reverse execution 1993 command or a record goto command. 1994 For the latter, EXECUTING is false and this has no effect. 1995 For the former, EXECUTING is true and we're in wait, about to 1996 move the thread. Since we need to recompute the stack, we temporarily 1997 set EXECUTING to false. */ 1998 bool executing = tp->executing (); 1999 set_executing (proc_target, inferior_ptid, false); 2000 SCOPE_EXIT 2001 { 2002 set_executing (proc_target, inferior_ptid, executing); 2003 }; 2004 return get_frame_id (get_current_frame ()); 2005 } 2006 2007 /* Start replaying a thread. */ 2008 2009 static struct btrace_insn_iterator * 2010 record_btrace_start_replaying (struct thread_info *tp) 2011 { 2012 struct btrace_insn_iterator *replay; 2013 struct btrace_thread_info *btinfo; 2014 2015 btinfo = &tp->btrace; 2016 replay = NULL; 2017 2018 /* We can't start replaying without trace. */ 2019 if (btinfo->functions.empty ()) 2020 error (_("No trace.")); 2021 2022 /* GDB stores the current frame_id when stepping in order to detects steps 2023 into subroutines. 2024 Since frames are computed differently when we're replaying, we need to 2025 recompute those stored frames and fix them up so we can still detect 2026 subroutines after we started replaying. */ 2027 try 2028 { 2029 struct frame_id frame_id; 2030 int upd_step_frame_id, upd_step_stack_frame_id; 2031 2032 /* The current frame without replaying - computed via normal unwind. */ 2033 frame_id = get_thread_current_frame_id (tp); 2034 2035 /* Check if we need to update any stepping-related frame id's. */ 2036 upd_step_frame_id = (frame_id == tp->control.step_frame_id); 2037 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id); 2038 2039 /* We start replaying at the end of the branch trace. This corresponds 2040 to the current instruction. */ 2041 replay = XNEW (struct btrace_insn_iterator); 2042 btrace_insn_end (replay, btinfo); 2043 2044 /* Skip gaps at the end of the trace. */ 2045 while (btrace_insn_get (replay) == NULL) 2046 { 2047 unsigned int steps; 2048 2049 steps = btrace_insn_prev (replay, 1); 2050 if (steps == 0) 2051 error (_("No trace.")); 2052 } 2053 2054 /* We're not replaying, yet. */ 2055 gdb_assert (btinfo->replay == NULL); 2056 btinfo->replay = replay; 2057 2058 /* Make sure we're not using any stale registers. */ 2059 registers_changed_thread (tp); 2060 2061 /* The current frame with replaying - computed via btrace unwind. */ 2062 frame_id = get_thread_current_frame_id (tp); 2063 2064 /* Replace stepping related frames where necessary. */ 2065 if (upd_step_frame_id) 2066 tp->control.step_frame_id = frame_id; 2067 if (upd_step_stack_frame_id) 2068 tp->control.step_stack_frame_id = frame_id; 2069 } 2070 catch (const gdb_exception &except) 2071 { 2072 xfree (btinfo->replay); 2073 btinfo->replay = NULL; 2074 2075 registers_changed_thread (tp); 2076 2077 throw; 2078 } 2079 2080 return replay; 2081 } 2082 2083 /* Stop replaying a thread. */ 2084 2085 static void 2086 record_btrace_stop_replaying (struct thread_info *tp) 2087 { 2088 struct btrace_thread_info *btinfo; 2089 2090 btinfo = &tp->btrace; 2091 2092 xfree (btinfo->replay); 2093 btinfo->replay = NULL; 2094 2095 /* Make sure we're not leaving any stale registers. */ 2096 registers_changed_thread (tp); 2097 } 2098 2099 /* Stop replaying TP if it is at the end of its execution history. */ 2100 2101 static void 2102 record_btrace_stop_replaying_at_end (struct thread_info *tp) 2103 { 2104 struct btrace_insn_iterator *replay, end; 2105 struct btrace_thread_info *btinfo; 2106 2107 btinfo = &tp->btrace; 2108 replay = btinfo->replay; 2109 2110 if (replay == NULL) 2111 return; 2112 2113 btrace_insn_end (&end, btinfo); 2114 2115 if (btrace_insn_cmp (replay, &end) == 0) 2116 record_btrace_stop_replaying (tp); 2117 } 2118 2119 /* The resume method of target record-btrace. */ 2120 2121 void 2122 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal) 2123 { 2124 enum btrace_thread_flag flag, cflag; 2125 2126 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (), 2127 ::execution_direction == EXEC_REVERSE ? "reverse-" : "", 2128 step ? "step" : "cont"); 2129 2130 /* Store the execution direction of the last resume. 2131 2132 If there is more than one resume call, we have to rely on infrun 2133 to not change the execution direction in-between. */ 2134 record_btrace_resume_exec_dir = ::execution_direction; 2135 2136 /* As long as we're not replaying, just forward the request. 2137 2138 For non-stop targets this means that no thread is replaying. In order to 2139 make progress, we may need to explicitly move replaying threads to the end 2140 of their execution history. */ 2141 if ((::execution_direction != EXEC_REVERSE) 2142 && !record_is_replaying (minus_one_ptid)) 2143 { 2144 this->beneath ()->resume (ptid, step, signal); 2145 return; 2146 } 2147 2148 /* Compute the btrace thread flag for the requested move. */ 2149 if (::execution_direction == EXEC_REVERSE) 2150 { 2151 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP; 2152 cflag = BTHR_RCONT; 2153 } 2154 else 2155 { 2156 flag = step == 0 ? BTHR_CONT : BTHR_STEP; 2157 cflag = BTHR_CONT; 2158 } 2159 2160 /* We just indicate the resume intent here. The actual stepping happens in 2161 record_btrace_wait below. 2162 2163 For all-stop targets, we only step INFERIOR_PTID and continue others. */ 2164 2165 process_stratum_target *proc_target = current_inferior ()->process_target (); 2166 2167 if (!target_is_non_stop_p ()) 2168 { 2169 gdb_assert (inferior_ptid.matches (ptid)); 2170 2171 for (thread_info *tp : all_non_exited_threads (proc_target, ptid)) 2172 { 2173 if (tp->ptid.matches (inferior_ptid)) 2174 record_btrace_resume_thread (tp, flag); 2175 else 2176 record_btrace_resume_thread (tp, cflag); 2177 } 2178 } 2179 else 2180 { 2181 for (thread_info *tp : all_non_exited_threads (proc_target, ptid)) 2182 record_btrace_resume_thread (tp, flag); 2183 } 2184 2185 /* Async support. */ 2186 if (target_can_async_p ()) 2187 { 2188 target_async (true); 2189 mark_async_event_handler (record_btrace_async_inferior_event_handler); 2190 } 2191 } 2192 2193 /* Cancel resuming TP. */ 2194 2195 static void 2196 record_btrace_cancel_resume (struct thread_info *tp) 2197 { 2198 btrace_thread_flags flags; 2199 2200 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP); 2201 if (flags == 0) 2202 return; 2203 2204 DEBUG ("cancel resume thread %s (%s): %x (%s)", 2205 print_thread_id (tp), 2206 tp->ptid.to_string ().c_str (), flags.raw (), 2207 btrace_thread_flag_to_str (flags)); 2208 2209 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP); 2210 record_btrace_stop_replaying_at_end (tp); 2211 } 2212 2213 /* Return a target_waitstatus indicating that we ran out of history. */ 2214 2215 static struct target_waitstatus 2216 btrace_step_no_history (void) 2217 { 2218 struct target_waitstatus status; 2219 2220 status.set_no_history (); 2221 2222 return status; 2223 } 2224 2225 /* Return a target_waitstatus indicating that a step finished. */ 2226 2227 static struct target_waitstatus 2228 btrace_step_stopped (void) 2229 { 2230 struct target_waitstatus status; 2231 2232 status.set_stopped (GDB_SIGNAL_TRAP); 2233 2234 return status; 2235 } 2236 2237 /* Return a target_waitstatus indicating that a thread was stopped as 2238 requested. */ 2239 2240 static struct target_waitstatus 2241 btrace_step_stopped_on_request (void) 2242 { 2243 struct target_waitstatus status; 2244 2245 status.set_stopped (GDB_SIGNAL_0); 2246 2247 return status; 2248 } 2249 2250 /* Return a target_waitstatus indicating a spurious stop. */ 2251 2252 static struct target_waitstatus 2253 btrace_step_spurious (void) 2254 { 2255 struct target_waitstatus status; 2256 2257 status.set_spurious (); 2258 2259 return status; 2260 } 2261 2262 /* Return a target_waitstatus indicating that the thread was not resumed. */ 2263 2264 static struct target_waitstatus 2265 btrace_step_no_resumed (void) 2266 { 2267 struct target_waitstatus status; 2268 2269 status.set_no_resumed (); 2270 2271 return status; 2272 } 2273 2274 /* Return a target_waitstatus indicating that we should wait again. */ 2275 2276 static struct target_waitstatus 2277 btrace_step_again (void) 2278 { 2279 struct target_waitstatus status; 2280 2281 status.set_ignore (); 2282 2283 return status; 2284 } 2285 2286 /* Clear the record histories. */ 2287 2288 static void 2289 record_btrace_clear_histories (struct btrace_thread_info *btinfo) 2290 { 2291 xfree (btinfo->insn_history); 2292 xfree (btinfo->call_history); 2293 2294 btinfo->insn_history = NULL; 2295 btinfo->call_history = NULL; 2296 } 2297 2298 /* Check whether TP's current replay position is at a breakpoint. */ 2299 2300 static int 2301 record_btrace_replay_at_breakpoint (struct thread_info *tp) 2302 { 2303 struct btrace_insn_iterator *replay; 2304 struct btrace_thread_info *btinfo; 2305 const struct btrace_insn *insn; 2306 2307 btinfo = &tp->btrace; 2308 replay = btinfo->replay; 2309 2310 if (replay == NULL) 2311 return 0; 2312 2313 insn = btrace_insn_get (replay); 2314 if (insn == NULL) 2315 return 0; 2316 2317 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc, 2318 &btinfo->stop_reason); 2319 } 2320 2321 /* Step one instruction in forward direction. */ 2322 2323 static struct target_waitstatus 2324 record_btrace_single_step_forward (struct thread_info *tp) 2325 { 2326 struct btrace_insn_iterator *replay, end, start; 2327 struct btrace_thread_info *btinfo; 2328 2329 btinfo = &tp->btrace; 2330 replay = btinfo->replay; 2331 2332 /* We're done if we're not replaying. */ 2333 if (replay == NULL) 2334 return btrace_step_no_history (); 2335 2336 /* Check if we're stepping a breakpoint. */ 2337 if (record_btrace_replay_at_breakpoint (tp)) 2338 return btrace_step_stopped (); 2339 2340 /* Skip gaps during replay. If we end up at a gap (at the end of the trace), 2341 jump back to the instruction at which we started. */ 2342 start = *replay; 2343 do 2344 { 2345 unsigned int steps; 2346 2347 /* We will bail out here if we continue stepping after reaching the end 2348 of the execution history. */ 2349 steps = btrace_insn_next (replay, 1); 2350 if (steps == 0) 2351 { 2352 *replay = start; 2353 return btrace_step_no_history (); 2354 } 2355 } 2356 while (btrace_insn_get (replay) == NULL); 2357 2358 /* Determine the end of the instruction trace. */ 2359 btrace_insn_end (&end, btinfo); 2360 2361 /* The execution trace contains (and ends with) the current instruction. 2362 This instruction has not been executed, yet, so the trace really ends 2363 one instruction earlier. */ 2364 if (btrace_insn_cmp (replay, &end) == 0) 2365 return btrace_step_no_history (); 2366 2367 return btrace_step_spurious (); 2368 } 2369 2370 /* Step one instruction in backward direction. */ 2371 2372 static struct target_waitstatus 2373 record_btrace_single_step_backward (struct thread_info *tp) 2374 { 2375 struct btrace_insn_iterator *replay, start; 2376 struct btrace_thread_info *btinfo; 2377 2378 btinfo = &tp->btrace; 2379 replay = btinfo->replay; 2380 2381 /* Start replaying if we're not already doing so. */ 2382 if (replay == NULL) 2383 replay = record_btrace_start_replaying (tp); 2384 2385 /* If we can't step any further, we reached the end of the history. 2386 Skip gaps during replay. If we end up at a gap (at the beginning of 2387 the trace), jump back to the instruction at which we started. */ 2388 start = *replay; 2389 do 2390 { 2391 unsigned int steps; 2392 2393 steps = btrace_insn_prev (replay, 1); 2394 if (steps == 0) 2395 { 2396 *replay = start; 2397 return btrace_step_no_history (); 2398 } 2399 } 2400 while (btrace_insn_get (replay) == NULL); 2401 2402 /* Check if we're stepping a breakpoint. 2403 2404 For reverse-stepping, this check is after the step. There is logic in 2405 infrun.c that handles reverse-stepping separately. See, for example, 2406 proceed and adjust_pc_after_break. 2407 2408 This code assumes that for reverse-stepping, PC points to the last 2409 de-executed instruction, whereas for forward-stepping PC points to the 2410 next to-be-executed instruction. */ 2411 if (record_btrace_replay_at_breakpoint (tp)) 2412 return btrace_step_stopped (); 2413 2414 return btrace_step_spurious (); 2415 } 2416 2417 /* Step a single thread. */ 2418 2419 static struct target_waitstatus 2420 record_btrace_step_thread (struct thread_info *tp) 2421 { 2422 struct btrace_thread_info *btinfo; 2423 struct target_waitstatus status; 2424 btrace_thread_flags flags; 2425 2426 btinfo = &tp->btrace; 2427 2428 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP); 2429 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP); 2430 2431 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp), 2432 tp->ptid.to_string ().c_str (), flags.raw (), 2433 btrace_thread_flag_to_str (flags)); 2434 2435 /* We can't step without an execution history. */ 2436 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp)) 2437 return btrace_step_no_history (); 2438 2439 switch (flags) 2440 { 2441 default: 2442 internal_error (_("invalid stepping type.")); 2443 2444 case BTHR_STOP: 2445 return btrace_step_stopped_on_request (); 2446 2447 case BTHR_STEP: 2448 status = record_btrace_single_step_forward (tp); 2449 if (status.kind () != TARGET_WAITKIND_SPURIOUS) 2450 break; 2451 2452 return btrace_step_stopped (); 2453 2454 case BTHR_RSTEP: 2455 status = record_btrace_single_step_backward (tp); 2456 if (status.kind () != TARGET_WAITKIND_SPURIOUS) 2457 break; 2458 2459 return btrace_step_stopped (); 2460 2461 case BTHR_CONT: 2462 status = record_btrace_single_step_forward (tp); 2463 if (status.kind () != TARGET_WAITKIND_SPURIOUS) 2464 break; 2465 2466 btinfo->flags |= flags; 2467 return btrace_step_again (); 2468 2469 case BTHR_RCONT: 2470 status = record_btrace_single_step_backward (tp); 2471 if (status.kind () != TARGET_WAITKIND_SPURIOUS) 2472 break; 2473 2474 btinfo->flags |= flags; 2475 return btrace_step_again (); 2476 } 2477 2478 /* We keep threads moving at the end of their execution history. The wait 2479 method will stop the thread for whom the event is reported. */ 2480 if (status.kind () == TARGET_WAITKIND_NO_HISTORY) 2481 btinfo->flags |= flags; 2482 2483 return status; 2484 } 2485 2486 /* Announce further events if necessary. */ 2487 2488 static void 2489 record_btrace_maybe_mark_async_event 2490 (const std::vector<thread_info *> &moving, 2491 const std::vector<thread_info *> &no_history) 2492 { 2493 bool more_moving = !moving.empty (); 2494 bool more_no_history = !no_history.empty ();; 2495 2496 if (!more_moving && !more_no_history) 2497 return; 2498 2499 if (more_moving) 2500 DEBUG ("movers pending"); 2501 2502 if (more_no_history) 2503 DEBUG ("no-history pending"); 2504 2505 mark_async_event_handler (record_btrace_async_inferior_event_handler); 2506 } 2507 2508 /* The wait method of target record-btrace. */ 2509 2510 ptid_t 2511 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status, 2512 target_wait_flags options) 2513 { 2514 std::vector<thread_info *> moving; 2515 std::vector<thread_info *> no_history; 2516 2517 /* Clear this, if needed we'll re-mark it below. */ 2518 clear_async_event_handler (record_btrace_async_inferior_event_handler); 2519 2520 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (), 2521 (unsigned) options); 2522 2523 /* As long as we're not replaying, just forward the request. */ 2524 if ((::execution_direction != EXEC_REVERSE) 2525 && !record_is_replaying (minus_one_ptid)) 2526 { 2527 return this->beneath ()->wait (ptid, status, options); 2528 } 2529 2530 /* Keep a work list of moving threads. */ 2531 process_stratum_target *proc_target = current_inferior ()->process_target (); 2532 for (thread_info *tp : all_non_exited_threads (proc_target, ptid)) 2533 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0) 2534 moving.push_back (tp); 2535 2536 if (moving.empty ()) 2537 { 2538 *status = btrace_step_no_resumed (); 2539 2540 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (), 2541 status->to_string ().c_str ()); 2542 2543 return null_ptid; 2544 } 2545 2546 /* Step moving threads one by one, one step each, until either one thread 2547 reports an event or we run out of threads to step. 2548 2549 When stepping more than one thread, chances are that some threads reach 2550 the end of their execution history earlier than others. If we reported 2551 this immediately, all-stop on top of non-stop would stop all threads and 2552 resume the same threads next time. And we would report the same thread 2553 having reached the end of its execution history again. 2554 2555 In the worst case, this would starve the other threads. But even if other 2556 threads would be allowed to make progress, this would result in far too 2557 many intermediate stops. 2558 2559 We therefore delay the reporting of "no execution history" until we have 2560 nothing else to report. By this time, all threads should have moved to 2561 either the beginning or the end of their execution history. There will 2562 be a single user-visible stop. */ 2563 struct thread_info *eventing = NULL; 2564 while ((eventing == NULL) && !moving.empty ()) 2565 { 2566 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();) 2567 { 2568 thread_info *tp = moving[ix]; 2569 2570 *status = record_btrace_step_thread (tp); 2571 2572 switch (status->kind ()) 2573 { 2574 case TARGET_WAITKIND_IGNORE: 2575 ix++; 2576 break; 2577 2578 case TARGET_WAITKIND_NO_HISTORY: 2579 no_history.push_back (ordered_remove (moving, ix)); 2580 break; 2581 2582 default: 2583 eventing = unordered_remove (moving, ix); 2584 break; 2585 } 2586 } 2587 } 2588 2589 if (eventing == NULL) 2590 { 2591 /* We started with at least one moving thread. This thread must have 2592 either stopped or reached the end of its execution history. 2593 2594 In the former case, EVENTING must not be NULL. 2595 In the latter case, NO_HISTORY must not be empty. */ 2596 gdb_assert (!no_history.empty ()); 2597 2598 /* We kept threads moving at the end of their execution history. Stop 2599 EVENTING now that we are going to report its stop. */ 2600 eventing = unordered_remove (no_history, 0); 2601 eventing->btrace.flags &= ~BTHR_MOVE; 2602 2603 *status = btrace_step_no_history (); 2604 } 2605 2606 gdb_assert (eventing != NULL); 2607 2608 /* We kept threads replaying at the end of their execution history. Stop 2609 replaying EVENTING now that we are going to report its stop. */ 2610 record_btrace_stop_replaying_at_end (eventing); 2611 2612 /* Stop all other threads. */ 2613 if (!target_is_non_stop_p ()) 2614 { 2615 for (thread_info *tp : current_inferior ()->non_exited_threads ()) 2616 record_btrace_cancel_resume (tp); 2617 } 2618 2619 /* In async mode, we need to announce further events. */ 2620 if (target_is_async_p ()) 2621 record_btrace_maybe_mark_async_event (moving, no_history); 2622 2623 /* Start record histories anew from the current position. */ 2624 record_btrace_clear_histories (&eventing->btrace); 2625 2626 /* We moved the replay position but did not update registers. */ 2627 registers_changed_thread (eventing); 2628 2629 DEBUG ("wait ended by thread %s (%s): %s", 2630 print_thread_id (eventing), 2631 eventing->ptid.to_string ().c_str (), 2632 status->to_string ().c_str ()); 2633 2634 return eventing->ptid; 2635 } 2636 2637 /* The stop method of target record-btrace. */ 2638 2639 void 2640 record_btrace_target::stop (ptid_t ptid) 2641 { 2642 DEBUG ("stop %s", ptid.to_string ().c_str ()); 2643 2644 /* As long as we're not replaying, just forward the request. */ 2645 if ((::execution_direction != EXEC_REVERSE) 2646 && !record_is_replaying (minus_one_ptid)) 2647 { 2648 this->beneath ()->stop (ptid); 2649 } 2650 else 2651 { 2652 process_stratum_target *proc_target 2653 = current_inferior ()->process_target (); 2654 2655 for (thread_info *tp : all_non_exited_threads (proc_target, ptid)) 2656 { 2657 tp->btrace.flags &= ~BTHR_MOVE; 2658 tp->btrace.flags |= BTHR_STOP; 2659 } 2660 } 2661 } 2662 2663 /* The can_execute_reverse method of target record-btrace. */ 2664 2665 bool 2666 record_btrace_target::can_execute_reverse () 2667 { 2668 return true; 2669 } 2670 2671 /* The stopped_by_sw_breakpoint method of target record-btrace. */ 2672 2673 bool 2674 record_btrace_target::stopped_by_sw_breakpoint () 2675 { 2676 if (record_is_replaying (minus_one_ptid)) 2677 { 2678 struct thread_info *tp = inferior_thread (); 2679 2680 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT; 2681 } 2682 2683 return this->beneath ()->stopped_by_sw_breakpoint (); 2684 } 2685 2686 /* The supports_stopped_by_sw_breakpoint method of target 2687 record-btrace. */ 2688 2689 bool 2690 record_btrace_target::supports_stopped_by_sw_breakpoint () 2691 { 2692 if (record_is_replaying (minus_one_ptid)) 2693 return true; 2694 2695 return this->beneath ()->supports_stopped_by_sw_breakpoint (); 2696 } 2697 2698 /* The stopped_by_sw_breakpoint method of target record-btrace. */ 2699 2700 bool 2701 record_btrace_target::stopped_by_hw_breakpoint () 2702 { 2703 if (record_is_replaying (minus_one_ptid)) 2704 { 2705 struct thread_info *tp = inferior_thread (); 2706 2707 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT; 2708 } 2709 2710 return this->beneath ()->stopped_by_hw_breakpoint (); 2711 } 2712 2713 /* The supports_stopped_by_hw_breakpoint method of target 2714 record-btrace. */ 2715 2716 bool 2717 record_btrace_target::supports_stopped_by_hw_breakpoint () 2718 { 2719 if (record_is_replaying (minus_one_ptid)) 2720 return true; 2721 2722 return this->beneath ()->supports_stopped_by_hw_breakpoint (); 2723 } 2724 2725 /* The update_thread_list method of target record-btrace. */ 2726 2727 void 2728 record_btrace_target::update_thread_list () 2729 { 2730 /* We don't add or remove threads during replay. */ 2731 if (record_is_replaying (minus_one_ptid)) 2732 return; 2733 2734 /* Forward the request. */ 2735 this->beneath ()->update_thread_list (); 2736 } 2737 2738 /* The thread_alive method of target record-btrace. */ 2739 2740 bool 2741 record_btrace_target::thread_alive (ptid_t ptid) 2742 { 2743 /* We don't add or remove threads during replay. */ 2744 if (record_is_replaying (minus_one_ptid)) 2745 return true; 2746 2747 /* Forward the request. */ 2748 return this->beneath ()->thread_alive (ptid); 2749 } 2750 2751 /* Set the replay branch trace instruction iterator. If IT is NULL, replay 2752 is stopped. */ 2753 2754 static void 2755 record_btrace_set_replay (struct thread_info *tp, 2756 const struct btrace_insn_iterator *it) 2757 { 2758 struct btrace_thread_info *btinfo; 2759 2760 btinfo = &tp->btrace; 2761 2762 if (it == NULL) 2763 record_btrace_stop_replaying (tp); 2764 else 2765 { 2766 if (btinfo->replay == NULL) 2767 record_btrace_start_replaying (tp); 2768 else if (btrace_insn_cmp (btinfo->replay, it) == 0) 2769 return; 2770 2771 *btinfo->replay = *it; 2772 registers_changed_thread (tp); 2773 } 2774 2775 /* Start anew from the new replay position. */ 2776 record_btrace_clear_histories (btinfo); 2777 2778 inferior_thread ()->set_stop_pc (regcache_read_pc (get_current_regcache ())); 2779 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1); 2780 } 2781 2782 /* The goto_record_begin method of target record-btrace. */ 2783 2784 void 2785 record_btrace_target::goto_record_begin () 2786 { 2787 struct thread_info *tp; 2788 struct btrace_insn_iterator begin; 2789 2790 tp = require_btrace_thread (); 2791 2792 btrace_insn_begin (&begin, &tp->btrace); 2793 2794 /* Skip gaps at the beginning of the trace. */ 2795 while (btrace_insn_get (&begin) == NULL) 2796 { 2797 unsigned int steps; 2798 2799 steps = btrace_insn_next (&begin, 1); 2800 if (steps == 0) 2801 error (_("No trace.")); 2802 } 2803 2804 record_btrace_set_replay (tp, &begin); 2805 } 2806 2807 /* The goto_record_end method of target record-btrace. */ 2808 2809 void 2810 record_btrace_target::goto_record_end () 2811 { 2812 struct thread_info *tp; 2813 2814 tp = require_btrace_thread (); 2815 2816 record_btrace_set_replay (tp, NULL); 2817 } 2818 2819 /* The goto_record method of target record-btrace. */ 2820 2821 void 2822 record_btrace_target::goto_record (ULONGEST insn) 2823 { 2824 struct thread_info *tp; 2825 struct btrace_insn_iterator it; 2826 unsigned int number; 2827 int found; 2828 2829 number = insn; 2830 2831 /* Check for wrap-arounds. */ 2832 if (number != insn) 2833 error (_("Instruction number out of range.")); 2834 2835 tp = require_btrace_thread (); 2836 2837 found = btrace_find_insn_by_number (&it, &tp->btrace, number); 2838 2839 /* Check if the instruction could not be found or is a gap. */ 2840 if (found == 0 || btrace_insn_get (&it) == NULL) 2841 error (_("No such instruction.")); 2842 2843 record_btrace_set_replay (tp, &it); 2844 } 2845 2846 /* The record_stop_replaying method of target record-btrace. */ 2847 2848 void 2849 record_btrace_target::record_stop_replaying () 2850 { 2851 for (thread_info *tp : current_inferior ()->non_exited_threads ()) 2852 record_btrace_stop_replaying (tp); 2853 } 2854 2855 /* The execution_direction target method. */ 2856 2857 enum exec_direction_kind 2858 record_btrace_target::execution_direction () 2859 { 2860 return record_btrace_resume_exec_dir; 2861 } 2862 2863 /* The prepare_to_generate_core target method. */ 2864 2865 void 2866 record_btrace_target::prepare_to_generate_core () 2867 { 2868 record_btrace_generating_corefile = 1; 2869 } 2870 2871 /* The done_generating_core target method. */ 2872 2873 void 2874 record_btrace_target::done_generating_core () 2875 { 2876 record_btrace_generating_corefile = 0; 2877 } 2878 2879 /* Start recording in BTS format. */ 2880 2881 static void 2882 cmd_record_btrace_bts_start (const char *args, int from_tty) 2883 { 2884 if (args != NULL && *args != 0) 2885 error (_("Invalid argument.")); 2886 2887 record_btrace_conf.format = BTRACE_FORMAT_BTS; 2888 2889 try 2890 { 2891 execute_command ("target record-btrace", from_tty); 2892 } 2893 catch (const gdb_exception &exception) 2894 { 2895 record_btrace_conf.format = BTRACE_FORMAT_NONE; 2896 throw; 2897 } 2898 } 2899 2900 /* Start recording in Intel Processor Trace format. */ 2901 2902 static void 2903 cmd_record_btrace_pt_start (const char *args, int from_tty) 2904 { 2905 if (args != NULL && *args != 0) 2906 error (_("Invalid argument.")); 2907 2908 record_btrace_conf.format = BTRACE_FORMAT_PT; 2909 2910 try 2911 { 2912 execute_command ("target record-btrace", from_tty); 2913 } 2914 catch (const gdb_exception &exception) 2915 { 2916 record_btrace_conf.format = BTRACE_FORMAT_NONE; 2917 throw; 2918 } 2919 } 2920 2921 /* Alias for "target record". */ 2922 2923 static void 2924 cmd_record_btrace_start (const char *args, int from_tty) 2925 { 2926 if (args != NULL && *args != 0) 2927 error (_("Invalid argument.")); 2928 2929 record_btrace_conf.format = BTRACE_FORMAT_PT; 2930 2931 try 2932 { 2933 execute_command ("target record-btrace", from_tty); 2934 } 2935 catch (const gdb_exception &exception) 2936 { 2937 record_btrace_conf.format = BTRACE_FORMAT_BTS; 2938 2939 try 2940 { 2941 execute_command ("target record-btrace", from_tty); 2942 } 2943 catch (const gdb_exception &ex) 2944 { 2945 record_btrace_conf.format = BTRACE_FORMAT_NONE; 2946 throw; 2947 } 2948 } 2949 } 2950 2951 /* The "show record btrace replay-memory-access" command. */ 2952 2953 static void 2954 cmd_show_replay_memory_access (struct ui_file *file, int from_tty, 2955 struct cmd_list_element *c, const char *value) 2956 { 2957 gdb_printf (file, _("Replay memory access is %s.\n"), 2958 replay_memory_access); 2959 } 2960 2961 /* The "set record btrace cpu none" command. */ 2962 2963 static void 2964 cmd_set_record_btrace_cpu_none (const char *args, int from_tty) 2965 { 2966 if (args != nullptr && *args != 0) 2967 error (_("Trailing junk: '%s'."), args); 2968 2969 record_btrace_cpu_state = CS_NONE; 2970 } 2971 2972 /* The "set record btrace cpu auto" command. */ 2973 2974 static void 2975 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty) 2976 { 2977 if (args != nullptr && *args != 0) 2978 error (_("Trailing junk: '%s'."), args); 2979 2980 record_btrace_cpu_state = CS_AUTO; 2981 } 2982 2983 /* The "set record btrace cpu" command. */ 2984 2985 static void 2986 cmd_set_record_btrace_cpu (const char *args, int from_tty) 2987 { 2988 if (args == nullptr) 2989 args = ""; 2990 2991 /* We use a hard-coded vendor string for now. */ 2992 unsigned int family, model, stepping; 2993 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family, 2994 &model, &l1, &stepping, &l2); 2995 if (matches == 3) 2996 { 2997 if (strlen (args) != l2) 2998 error (_("Trailing junk: '%s'."), args + l2); 2999 } 3000 else if (matches == 2) 3001 { 3002 if (strlen (args) != l1) 3003 error (_("Trailing junk: '%s'."), args + l1); 3004 3005 stepping = 0; 3006 } 3007 else 3008 error (_("Bad format. See \"help set record btrace cpu\".")); 3009 3010 if (USHRT_MAX < family) 3011 error (_("Cpu family too big.")); 3012 3013 if (UCHAR_MAX < model) 3014 error (_("Cpu model too big.")); 3015 3016 if (UCHAR_MAX < stepping) 3017 error (_("Cpu stepping too big.")); 3018 3019 record_btrace_cpu.vendor = CV_INTEL; 3020 record_btrace_cpu.family = family; 3021 record_btrace_cpu.model = model; 3022 record_btrace_cpu.stepping = stepping; 3023 3024 record_btrace_cpu_state = CS_CPU; 3025 } 3026 3027 /* The "show record btrace cpu" command. */ 3028 3029 static void 3030 cmd_show_record_btrace_cpu (const char *args, int from_tty) 3031 { 3032 if (args != nullptr && *args != 0) 3033 error (_("Trailing junk: '%s'."), args); 3034 3035 switch (record_btrace_cpu_state) 3036 { 3037 case CS_AUTO: 3038 gdb_printf (_("btrace cpu is 'auto'.\n")); 3039 return; 3040 3041 case CS_NONE: 3042 gdb_printf (_("btrace cpu is 'none'.\n")); 3043 return; 3044 3045 case CS_CPU: 3046 switch (record_btrace_cpu.vendor) 3047 { 3048 case CV_INTEL: 3049 if (record_btrace_cpu.stepping == 0) 3050 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"), 3051 record_btrace_cpu.family, 3052 record_btrace_cpu.model); 3053 else 3054 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"), 3055 record_btrace_cpu.family, 3056 record_btrace_cpu.model, 3057 record_btrace_cpu.stepping); 3058 return; 3059 } 3060 } 3061 3062 error (_("Internal error: bad cpu state.")); 3063 } 3064 3065 /* The "record bts buffer-size" show value function. */ 3066 3067 static void 3068 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty, 3069 struct cmd_list_element *c, 3070 const char *value) 3071 { 3072 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"), 3073 value); 3074 } 3075 3076 /* The "record pt buffer-size" show value function. */ 3077 3078 static void 3079 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty, 3080 struct cmd_list_element *c, 3081 const char *value) 3082 { 3083 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"), 3084 value); 3085 } 3086 3087 /* Initialize btrace commands. */ 3088 3089 void _initialize_record_btrace (); 3090 void 3091 _initialize_record_btrace () 3092 { 3093 cmd_list_element *record_btrace_cmd 3094 = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start, 3095 _("Start branch trace recording."), 3096 &record_btrace_cmdlist, 0, &record_cmdlist); 3097 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist); 3098 3099 cmd_list_element *record_btrace_bts_cmd 3100 = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start, 3101 _("\ 3102 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\ 3103 The processor stores a from/to record for each branch into a cyclic buffer.\n\ 3104 This format may not be available on all processors."), 3105 &record_btrace_cmdlist); 3106 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1, 3107 &record_cmdlist); 3108 3109 cmd_list_element *record_btrace_pt_cmd 3110 = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start, 3111 _("\ 3112 Start branch trace recording in Intel Processor Trace format.\n\n\ 3113 This format may not be available on all processors."), 3114 &record_btrace_cmdlist); 3115 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist); 3116 3117 add_setshow_prefix_cmd ("btrace", class_support, 3118 _("Set record options."), 3119 _("Show record options."), 3120 &set_record_btrace_cmdlist, 3121 &show_record_btrace_cmdlist, 3122 &set_record_cmdlist, &show_record_cmdlist); 3123 3124 add_setshow_enum_cmd ("replay-memory-access", no_class, 3125 replay_memory_access_types, &replay_memory_access, _("\ 3126 Set what memory accesses are allowed during replay."), _("\ 3127 Show what memory accesses are allowed during replay."), 3128 _("Default is READ-ONLY.\n\n\ 3129 The btrace record target does not trace data.\n\ 3130 The memory therefore corresponds to the live target and not \ 3131 to the current replay position.\n\n\ 3132 When READ-ONLY, allow accesses to read-only memory during replay.\n\ 3133 When READ-WRITE, allow accesses to read-only and read-write memory during \ 3134 replay."), 3135 NULL, cmd_show_replay_memory_access, 3136 &set_record_btrace_cmdlist, 3137 &show_record_btrace_cmdlist); 3138 3139 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu, 3140 _("\ 3141 Set the cpu to be used for trace decode.\n\n\ 3142 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\ 3143 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\ 3144 When decoding branch trace, enable errata workarounds for the specified cpu.\n\ 3145 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\ 3146 When GDB does not support that cpu, this option can be used to enable\n\ 3147 workarounds for a similar cpu that GDB supports.\n\n\ 3148 When set to \"none\", errata workarounds are disabled."), 3149 &set_record_btrace_cpu_cmdlist, 3150 1, 3151 &set_record_btrace_cmdlist); 3152 3153 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\ 3154 Automatically determine the cpu to be used for trace decode."), 3155 &set_record_btrace_cpu_cmdlist); 3156 3157 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\ 3158 Do not enable errata workarounds for trace decode."), 3159 &set_record_btrace_cpu_cmdlist); 3160 3161 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\ 3162 Show the cpu to be used for trace decode."), 3163 &show_record_btrace_cmdlist); 3164 3165 add_setshow_prefix_cmd ("bts", class_support, 3166 _("Set record btrace bts options."), 3167 _("Show record btrace bts options."), 3168 &set_record_btrace_bts_cmdlist, 3169 &show_record_btrace_bts_cmdlist, 3170 &set_record_btrace_cmdlist, 3171 &show_record_btrace_cmdlist); 3172 3173 add_setshow_uinteger_cmd ("buffer-size", no_class, 3174 &record_btrace_conf.bts.size, 3175 _("Set the record/replay bts buffer size."), 3176 _("Show the record/replay bts buffer size."), _("\ 3177 When starting recording request a trace buffer of this size. \ 3178 The actual buffer size may differ from the requested size. \ 3179 Use \"info record\" to see the actual buffer size.\n\n\ 3180 Bigger buffers allow longer recording but also take more time to process \ 3181 the recorded execution trace.\n\n\ 3182 The trace buffer size may not be changed while recording."), NULL, 3183 show_record_bts_buffer_size_value, 3184 &set_record_btrace_bts_cmdlist, 3185 &show_record_btrace_bts_cmdlist); 3186 3187 add_setshow_prefix_cmd ("pt", class_support, 3188 _("Set record btrace pt options."), 3189 _("Show record btrace pt options."), 3190 &set_record_btrace_pt_cmdlist, 3191 &show_record_btrace_pt_cmdlist, 3192 &set_record_btrace_cmdlist, 3193 &show_record_btrace_cmdlist); 3194 3195 add_setshow_uinteger_cmd ("buffer-size", no_class, 3196 &record_btrace_conf.pt.size, 3197 _("Set the record/replay pt buffer size."), 3198 _("Show the record/replay pt buffer size."), _("\ 3199 Bigger buffers allow longer recording but also take more time to process \ 3200 the recorded execution.\n\ 3201 The actual buffer size may differ from the requested size. Use \"info record\" \ 3202 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value, 3203 &set_record_btrace_pt_cmdlist, 3204 &show_record_btrace_pt_cmdlist); 3205 3206 add_target (record_btrace_target_info, record_btrace_target_open); 3207 3208 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL, 3209 xcalloc, xfree); 3210 3211 record_btrace_conf.bts.size = 64 * 1024; 3212 record_btrace_conf.pt.size = 16 * 1024; 3213 } 3214