1 /* Select target systems and architectures at runtime for GDB. 2 3 Copyright (C) 1990-2017 Free Software Foundation, Inc. 4 5 Contributed by Cygnus Support. 6 7 This file is part of GDB. 8 9 This program is free software; you can redistribute it and/or modify 10 it under the terms of the GNU General Public License as published by 11 the Free Software Foundation; either version 3 of the License, or 12 (at your option) any later version. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 21 22 #include "defs.h" 23 #include "target.h" 24 #include "target-dcache.h" 25 #include "gdbcmd.h" 26 #include "symtab.h" 27 #include "inferior.h" 28 #include "infrun.h" 29 #include "bfd.h" 30 #include "symfile.h" 31 #include "objfiles.h" 32 #include "dcache.h" 33 #include <signal.h> 34 #include "regcache.h" 35 #include "gdbcore.h" 36 #include "target-descriptions.h" 37 #include "gdbthread.h" 38 #include "solib.h" 39 #include "exec.h" 40 #include "inline-frame.h" 41 #include "tracepoint.h" 42 #include "gdb/fileio.h" 43 #include "agent.h" 44 #include "auxv.h" 45 #include "target-debug.h" 46 #include "top.h" 47 #include "event-top.h" 48 #include <algorithm> 49 50 static void target_info (char *, int); 51 52 static void generic_tls_error (void) ATTRIBUTE_NORETURN; 53 54 static void default_terminal_info (struct target_ops *, const char *, int); 55 56 static int default_watchpoint_addr_within_range (struct target_ops *, 57 CORE_ADDR, CORE_ADDR, int); 58 59 static int default_region_ok_for_hw_watchpoint (struct target_ops *, 60 CORE_ADDR, int); 61 62 static void default_rcmd (struct target_ops *, const char *, struct ui_file *); 63 64 static ptid_t default_get_ada_task_ptid (struct target_ops *self, 65 long lwp, long tid); 66 67 static int default_follow_fork (struct target_ops *self, int follow_child, 68 int detach_fork); 69 70 static void default_mourn_inferior (struct target_ops *self); 71 72 static int default_search_memory (struct target_ops *ops, 73 CORE_ADDR start_addr, 74 ULONGEST search_space_len, 75 const gdb_byte *pattern, 76 ULONGEST pattern_len, 77 CORE_ADDR *found_addrp); 78 79 static int default_verify_memory (struct target_ops *self, 80 const gdb_byte *data, 81 CORE_ADDR memaddr, ULONGEST size); 82 83 static struct address_space *default_thread_address_space 84 (struct target_ops *self, ptid_t ptid); 85 86 static void tcomplain (void) ATTRIBUTE_NORETURN; 87 88 static int return_zero (struct target_ops *); 89 90 static int return_zero_has_execution (struct target_ops *, ptid_t); 91 92 static void target_command (char *, int); 93 94 static struct target_ops *find_default_run_target (const char *); 95 96 static struct gdbarch *default_thread_architecture (struct target_ops *ops, 97 ptid_t ptid); 98 99 static int dummy_find_memory_regions (struct target_ops *self, 100 find_memory_region_ftype ignore1, 101 void *ignore2); 102 103 static char *dummy_make_corefile_notes (struct target_ops *self, 104 bfd *ignore1, int *ignore2); 105 106 static const char *default_pid_to_str (struct target_ops *ops, ptid_t ptid); 107 108 static enum exec_direction_kind default_execution_direction 109 (struct target_ops *self); 110 111 static struct target_ops debug_target; 112 113 #include "target-delegates.c" 114 115 static void init_dummy_target (void); 116 117 static void update_current_target (void); 118 119 /* Vector of existing target structures. */ 120 typedef struct target_ops *target_ops_p; 121 DEF_VEC_P (target_ops_p); 122 static VEC (target_ops_p) *target_structs; 123 124 /* The initial current target, so that there is always a semi-valid 125 current target. */ 126 127 static struct target_ops dummy_target; 128 129 /* Top of target stack. */ 130 131 static struct target_ops *target_stack; 132 133 /* The target structure we are currently using to talk to a process 134 or file or whatever "inferior" we have. */ 135 136 struct target_ops current_target; 137 138 /* Command list for target. */ 139 140 static struct cmd_list_element *targetlist = NULL; 141 142 /* Nonzero if we should trust readonly sections from the 143 executable when reading memory. */ 144 145 static int trust_readonly = 0; 146 147 /* Nonzero if we should show true memory content including 148 memory breakpoint inserted by gdb. */ 149 150 static int show_memory_breakpoints = 0; 151 152 /* These globals control whether GDB attempts to perform these 153 operations; they are useful for targets that need to prevent 154 inadvertant disruption, such as in non-stop mode. */ 155 156 int may_write_registers = 1; 157 158 int may_write_memory = 1; 159 160 int may_insert_breakpoints = 1; 161 162 int may_insert_tracepoints = 1; 163 164 int may_insert_fast_tracepoints = 1; 165 166 int may_stop = 1; 167 168 /* Non-zero if we want to see trace of target level stuff. */ 169 170 static unsigned int targetdebug = 0; 171 172 static void 173 set_targetdebug (char *args, int from_tty, struct cmd_list_element *c) 174 { 175 update_current_target (); 176 } 177 178 static void 179 show_targetdebug (struct ui_file *file, int from_tty, 180 struct cmd_list_element *c, const char *value) 181 { 182 fprintf_filtered (file, _("Target debugging is %s.\n"), value); 183 } 184 185 static void setup_target_debug (void); 186 187 /* The user just typed 'target' without the name of a target. */ 188 189 static void 190 target_command (char *arg, int from_tty) 191 { 192 fputs_filtered ("Argument required (target name). Try `help target'\n", 193 gdb_stdout); 194 } 195 196 /* Default target_has_* methods for process_stratum targets. */ 197 198 int 199 default_child_has_all_memory (struct target_ops *ops) 200 { 201 /* If no inferior selected, then we can't read memory here. */ 202 if (ptid_equal (inferior_ptid, null_ptid)) 203 return 0; 204 205 return 1; 206 } 207 208 int 209 default_child_has_memory (struct target_ops *ops) 210 { 211 /* If no inferior selected, then we can't read memory here. */ 212 if (ptid_equal (inferior_ptid, null_ptid)) 213 return 0; 214 215 return 1; 216 } 217 218 int 219 default_child_has_stack (struct target_ops *ops) 220 { 221 /* If no inferior selected, there's no stack. */ 222 if (ptid_equal (inferior_ptid, null_ptid)) 223 return 0; 224 225 return 1; 226 } 227 228 int 229 default_child_has_registers (struct target_ops *ops) 230 { 231 /* Can't read registers from no inferior. */ 232 if (ptid_equal (inferior_ptid, null_ptid)) 233 return 0; 234 235 return 1; 236 } 237 238 int 239 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid) 240 { 241 /* If there's no thread selected, then we can't make it run through 242 hoops. */ 243 if (ptid_equal (the_ptid, null_ptid)) 244 return 0; 245 246 return 1; 247 } 248 249 250 int 251 target_has_all_memory_1 (void) 252 { 253 struct target_ops *t; 254 255 for (t = current_target.beneath; t != NULL; t = t->beneath) 256 if (t->to_has_all_memory (t)) 257 return 1; 258 259 return 0; 260 } 261 262 int 263 target_has_memory_1 (void) 264 { 265 struct target_ops *t; 266 267 for (t = current_target.beneath; t != NULL; t = t->beneath) 268 if (t->to_has_memory (t)) 269 return 1; 270 271 return 0; 272 } 273 274 int 275 target_has_stack_1 (void) 276 { 277 struct target_ops *t; 278 279 for (t = current_target.beneath; t != NULL; t = t->beneath) 280 if (t->to_has_stack (t)) 281 return 1; 282 283 return 0; 284 } 285 286 int 287 target_has_registers_1 (void) 288 { 289 struct target_ops *t; 290 291 for (t = current_target.beneath; t != NULL; t = t->beneath) 292 if (t->to_has_registers (t)) 293 return 1; 294 295 return 0; 296 } 297 298 int 299 target_has_execution_1 (ptid_t the_ptid) 300 { 301 struct target_ops *t; 302 303 for (t = current_target.beneath; t != NULL; t = t->beneath) 304 if (t->to_has_execution (t, the_ptid)) 305 return 1; 306 307 return 0; 308 } 309 310 int 311 target_has_execution_current (void) 312 { 313 return target_has_execution_1 (inferior_ptid); 314 } 315 316 /* Complete initialization of T. This ensures that various fields in 317 T are set, if needed by the target implementation. */ 318 319 void 320 complete_target_initialization (struct target_ops *t) 321 { 322 /* Provide default values for all "must have" methods. */ 323 324 if (t->to_has_all_memory == NULL) 325 t->to_has_all_memory = return_zero; 326 327 if (t->to_has_memory == NULL) 328 t->to_has_memory = return_zero; 329 330 if (t->to_has_stack == NULL) 331 t->to_has_stack = return_zero; 332 333 if (t->to_has_registers == NULL) 334 t->to_has_registers = return_zero; 335 336 if (t->to_has_execution == NULL) 337 t->to_has_execution = return_zero_has_execution; 338 339 /* These methods can be called on an unpushed target and so require 340 a default implementation if the target might plausibly be the 341 default run target. */ 342 gdb_assert (t->to_can_run == NULL || (t->to_can_async_p != NULL 343 && t->to_supports_non_stop != NULL)); 344 345 install_delegators (t); 346 } 347 348 /* This is used to implement the various target commands. */ 349 350 static void 351 open_target (char *args, int from_tty, struct cmd_list_element *command) 352 { 353 struct target_ops *ops = (struct target_ops *) get_cmd_context (command); 354 355 if (targetdebug) 356 fprintf_unfiltered (gdb_stdlog, "-> %s->to_open (...)\n", 357 ops->to_shortname); 358 359 ops->to_open (args, from_tty); 360 361 if (targetdebug) 362 fprintf_unfiltered (gdb_stdlog, "<- %s->to_open (%s, %d)\n", 363 ops->to_shortname, args, from_tty); 364 } 365 366 /* Add possible target architecture T to the list and add a new 367 command 'target T->to_shortname'. Set COMPLETER as the command's 368 completer if not NULL. */ 369 370 void 371 add_target_with_completer (struct target_ops *t, 372 completer_ftype *completer) 373 { 374 struct cmd_list_element *c; 375 376 complete_target_initialization (t); 377 378 VEC_safe_push (target_ops_p, target_structs, t); 379 380 if (targetlist == NULL) 381 add_prefix_cmd ("target", class_run, target_command, _("\ 382 Connect to a target machine or process.\n\ 383 The first argument is the type or protocol of the target machine.\n\ 384 Remaining arguments are interpreted by the target protocol. For more\n\ 385 information on the arguments for a particular protocol, type\n\ 386 `help target ' followed by the protocol name."), 387 &targetlist, "target ", 0, &cmdlist); 388 c = add_cmd (t->to_shortname, no_class, NULL, t->to_doc, &targetlist); 389 set_cmd_sfunc (c, open_target); 390 set_cmd_context (c, t); 391 if (completer != NULL) 392 set_cmd_completer (c, completer); 393 } 394 395 /* Add a possible target architecture to the list. */ 396 397 void 398 add_target (struct target_ops *t) 399 { 400 add_target_with_completer (t, NULL); 401 } 402 403 /* See target.h. */ 404 405 void 406 add_deprecated_target_alias (struct target_ops *t, const char *alias) 407 { 408 struct cmd_list_element *c; 409 char *alt; 410 411 /* If we use add_alias_cmd, here, we do not get the deprecated warning, 412 see PR cli/15104. */ 413 c = add_cmd (alias, no_class, NULL, t->to_doc, &targetlist); 414 set_cmd_sfunc (c, open_target); 415 set_cmd_context (c, t); 416 alt = xstrprintf ("target %s", t->to_shortname); 417 deprecate_cmd (c, alt); 418 } 419 420 /* Stub functions */ 421 422 void 423 target_kill (void) 424 { 425 current_target.to_kill (¤t_target); 426 } 427 428 void 429 target_load (const char *arg, int from_tty) 430 { 431 target_dcache_invalidate (); 432 (*current_target.to_load) (¤t_target, arg, from_tty); 433 } 434 435 /* Possible terminal states. */ 436 437 enum terminal_state 438 { 439 /* The inferior's terminal settings are in effect. */ 440 terminal_is_inferior = 0, 441 442 /* Some of our terminal settings are in effect, enough to get 443 proper output. */ 444 terminal_is_ours_for_output = 1, 445 446 /* Our terminal settings are in effect, for output and input. */ 447 terminal_is_ours = 2 448 }; 449 450 static enum terminal_state terminal_state = terminal_is_ours; 451 452 /* See target.h. */ 453 454 void 455 target_terminal_init (void) 456 { 457 (*current_target.to_terminal_init) (¤t_target); 458 459 terminal_state = terminal_is_ours; 460 } 461 462 /* See target.h. */ 463 464 int 465 target_terminal_is_inferior (void) 466 { 467 return (terminal_state == terminal_is_inferior); 468 } 469 470 /* See target.h. */ 471 472 int 473 target_terminal_is_ours (void) 474 { 475 return (terminal_state == terminal_is_ours); 476 } 477 478 /* See target.h. */ 479 480 void 481 target_terminal_inferior (void) 482 { 483 struct ui *ui = current_ui; 484 485 /* A background resume (``run&'') should leave GDB in control of the 486 terminal. */ 487 if (ui->prompt_state != PROMPT_BLOCKED) 488 return; 489 490 /* Since we always run the inferior in the main console (unless "set 491 inferior-tty" is in effect), when some UI other than the main one 492 calls target_terminal_inferior/target_terminal_inferior, then we 493 leave the main UI's terminal settings as is. */ 494 if (ui != main_ui) 495 return; 496 497 if (terminal_state == terminal_is_inferior) 498 return; 499 500 /* If GDB is resuming the inferior in the foreground, install 501 inferior's terminal modes. */ 502 (*current_target.to_terminal_inferior) (¤t_target); 503 terminal_state = terminal_is_inferior; 504 505 /* If the user hit C-c before, pretend that it was hit right 506 here. */ 507 if (check_quit_flag ()) 508 target_pass_ctrlc (); 509 } 510 511 /* See target.h. */ 512 513 void 514 target_terminal_ours (void) 515 { 516 struct ui *ui = current_ui; 517 518 /* See target_terminal_inferior. */ 519 if (ui != main_ui) 520 return; 521 522 if (terminal_state == terminal_is_ours) 523 return; 524 525 (*current_target.to_terminal_ours) (¤t_target); 526 terminal_state = terminal_is_ours; 527 } 528 529 /* See target.h. */ 530 531 void 532 target_terminal_ours_for_output (void) 533 { 534 struct ui *ui = current_ui; 535 536 /* See target_terminal_inferior. */ 537 if (ui != main_ui) 538 return; 539 540 if (terminal_state != terminal_is_inferior) 541 return; 542 (*current_target.to_terminal_ours_for_output) (¤t_target); 543 terminal_state = terminal_is_ours_for_output; 544 } 545 546 /* See target.h. */ 547 548 int 549 target_supports_terminal_ours (void) 550 { 551 struct target_ops *t; 552 553 for (t = current_target.beneath; t != NULL; t = t->beneath) 554 { 555 if (t->to_terminal_ours != delegate_terminal_ours 556 && t->to_terminal_ours != tdefault_terminal_ours) 557 return 1; 558 } 559 560 return 0; 561 } 562 563 /* Restore the terminal to its previous state (helper for 564 make_cleanup_restore_target_terminal). */ 565 566 static void 567 cleanup_restore_target_terminal (void *arg) 568 { 569 enum terminal_state *previous_state = (enum terminal_state *) arg; 570 571 switch (*previous_state) 572 { 573 case terminal_is_ours: 574 target_terminal_ours (); 575 break; 576 case terminal_is_ours_for_output: 577 target_terminal_ours_for_output (); 578 break; 579 case terminal_is_inferior: 580 target_terminal_inferior (); 581 break; 582 } 583 } 584 585 /* See target.h. */ 586 587 struct cleanup * 588 make_cleanup_restore_target_terminal (void) 589 { 590 enum terminal_state *ts = XNEW (enum terminal_state); 591 592 *ts = terminal_state; 593 594 return make_cleanup_dtor (cleanup_restore_target_terminal, ts, xfree); 595 } 596 597 static void 598 tcomplain (void) 599 { 600 error (_("You can't do that when your target is `%s'"), 601 current_target.to_shortname); 602 } 603 604 void 605 noprocess (void) 606 { 607 error (_("You can't do that without a process to debug.")); 608 } 609 610 static void 611 default_terminal_info (struct target_ops *self, const char *args, int from_tty) 612 { 613 printf_unfiltered (_("No saved terminal information.\n")); 614 } 615 616 /* A default implementation for the to_get_ada_task_ptid target method. 617 618 This function builds the PTID by using both LWP and TID as part of 619 the PTID lwp and tid elements. The pid used is the pid of the 620 inferior_ptid. */ 621 622 static ptid_t 623 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid) 624 { 625 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid); 626 } 627 628 static enum exec_direction_kind 629 default_execution_direction (struct target_ops *self) 630 { 631 if (!target_can_execute_reverse) 632 return EXEC_FORWARD; 633 else if (!target_can_async_p ()) 634 return EXEC_FORWARD; 635 else 636 gdb_assert_not_reached ("\ 637 to_execution_direction must be implemented for reverse async"); 638 } 639 640 /* Go through the target stack from top to bottom, copying over zero 641 entries in current_target, then filling in still empty entries. In 642 effect, we are doing class inheritance through the pushed target 643 vectors. 644 645 NOTE: cagney/2003-10-17: The problem with this inheritance, as it 646 is currently implemented, is that it discards any knowledge of 647 which target an inherited method originally belonged to. 648 Consequently, new new target methods should instead explicitly and 649 locally search the target stack for the target that can handle the 650 request. */ 651 652 static void 653 update_current_target (void) 654 { 655 struct target_ops *t; 656 657 /* First, reset current's contents. */ 658 memset (¤t_target, 0, sizeof (current_target)); 659 660 /* Install the delegators. */ 661 install_delegators (¤t_target); 662 663 current_target.to_stratum = target_stack->to_stratum; 664 665 #define INHERIT(FIELD, TARGET) \ 666 if (!current_target.FIELD) \ 667 current_target.FIELD = (TARGET)->FIELD 668 669 /* Do not add any new INHERITs here. Instead, use the delegation 670 mechanism provided by make-target-delegates. */ 671 for (t = target_stack; t; t = t->beneath) 672 { 673 INHERIT (to_shortname, t); 674 INHERIT (to_longname, t); 675 INHERIT (to_attach_no_wait, t); 676 INHERIT (to_have_steppable_watchpoint, t); 677 INHERIT (to_have_continuable_watchpoint, t); 678 INHERIT (to_has_thread_control, t); 679 } 680 #undef INHERIT 681 682 /* Finally, position the target-stack beneath the squashed 683 "current_target". That way code looking for a non-inherited 684 target method can quickly and simply find it. */ 685 current_target.beneath = target_stack; 686 687 if (targetdebug) 688 setup_target_debug (); 689 } 690 691 /* Push a new target type into the stack of the existing target accessors, 692 possibly superseding some of the existing accessors. 693 694 Rather than allow an empty stack, we always have the dummy target at 695 the bottom stratum, so we can call the function vectors without 696 checking them. */ 697 698 void 699 push_target (struct target_ops *t) 700 { 701 struct target_ops **cur; 702 703 /* Check magic number. If wrong, it probably means someone changed 704 the struct definition, but not all the places that initialize one. */ 705 if (t->to_magic != OPS_MAGIC) 706 { 707 fprintf_unfiltered (gdb_stderr, 708 "Magic number of %s target struct wrong\n", 709 t->to_shortname); 710 internal_error (__FILE__, __LINE__, 711 _("failed internal consistency check")); 712 } 713 714 /* Find the proper stratum to install this target in. */ 715 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath) 716 { 717 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum) 718 break; 719 } 720 721 /* If there's already targets at this stratum, remove them. */ 722 /* FIXME: cagney/2003-10-15: I think this should be popping all 723 targets to CUR, and not just those at this stratum level. */ 724 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum) 725 { 726 /* There's already something at this stratum level. Close it, 727 and un-hook it from the stack. */ 728 struct target_ops *tmp = (*cur); 729 730 (*cur) = (*cur)->beneath; 731 tmp->beneath = NULL; 732 target_close (tmp); 733 } 734 735 /* We have removed all targets in our stratum, now add the new one. */ 736 t->beneath = (*cur); 737 (*cur) = t; 738 739 update_current_target (); 740 } 741 742 /* Remove a target_ops vector from the stack, wherever it may be. 743 Return how many times it was removed (0 or 1). */ 744 745 int 746 unpush_target (struct target_ops *t) 747 { 748 struct target_ops **cur; 749 struct target_ops *tmp; 750 751 if (t->to_stratum == dummy_stratum) 752 internal_error (__FILE__, __LINE__, 753 _("Attempt to unpush the dummy target")); 754 755 /* Look for the specified target. Note that we assume that a target 756 can only occur once in the target stack. */ 757 758 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath) 759 { 760 if ((*cur) == t) 761 break; 762 } 763 764 /* If we don't find target_ops, quit. Only open targets should be 765 closed. */ 766 if ((*cur) == NULL) 767 return 0; 768 769 /* Unchain the target. */ 770 tmp = (*cur); 771 (*cur) = (*cur)->beneath; 772 tmp->beneath = NULL; 773 774 update_current_target (); 775 776 /* Finally close the target. Note we do this after unchaining, so 777 any target method calls from within the target_close 778 implementation don't end up in T anymore. */ 779 target_close (t); 780 781 return 1; 782 } 783 784 /* Unpush TARGET and assert that it worked. */ 785 786 static void 787 unpush_target_and_assert (struct target_ops *target) 788 { 789 if (!unpush_target (target)) 790 { 791 fprintf_unfiltered (gdb_stderr, 792 "pop_all_targets couldn't find target %s\n", 793 target->to_shortname); 794 internal_error (__FILE__, __LINE__, 795 _("failed internal consistency check")); 796 } 797 } 798 799 void 800 pop_all_targets_above (enum strata above_stratum) 801 { 802 while ((int) (current_target.to_stratum) > (int) above_stratum) 803 unpush_target_and_assert (target_stack); 804 } 805 806 /* See target.h. */ 807 808 void 809 pop_all_targets_at_and_above (enum strata stratum) 810 { 811 while ((int) (current_target.to_stratum) >= (int) stratum) 812 unpush_target_and_assert (target_stack); 813 } 814 815 void 816 pop_all_targets (void) 817 { 818 pop_all_targets_above (dummy_stratum); 819 } 820 821 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */ 822 823 int 824 target_is_pushed (struct target_ops *t) 825 { 826 struct target_ops *cur; 827 828 /* Check magic number. If wrong, it probably means someone changed 829 the struct definition, but not all the places that initialize one. */ 830 if (t->to_magic != OPS_MAGIC) 831 { 832 fprintf_unfiltered (gdb_stderr, 833 "Magic number of %s target struct wrong\n", 834 t->to_shortname); 835 internal_error (__FILE__, __LINE__, 836 _("failed internal consistency check")); 837 } 838 839 for (cur = target_stack; cur != NULL; cur = cur->beneath) 840 if (cur == t) 841 return 1; 842 843 return 0; 844 } 845 846 /* Default implementation of to_get_thread_local_address. */ 847 848 static void 849 generic_tls_error (void) 850 { 851 throw_error (TLS_GENERIC_ERROR, 852 _("Cannot find thread-local variables on this target")); 853 } 854 855 /* Using the objfile specified in OBJFILE, find the address for the 856 current thread's thread-local storage with offset OFFSET. */ 857 CORE_ADDR 858 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset) 859 { 860 volatile CORE_ADDR addr = 0; 861 struct target_ops *target = ¤t_target; 862 863 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ())) 864 { 865 ptid_t ptid = inferior_ptid; 866 867 TRY 868 { 869 CORE_ADDR lm_addr; 870 871 /* Fetch the load module address for this objfile. */ 872 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (), 873 objfile); 874 875 addr = target->to_get_thread_local_address (target, ptid, 876 lm_addr, offset); 877 } 878 /* If an error occurred, print TLS related messages here. Otherwise, 879 throw the error to some higher catcher. */ 880 CATCH (ex, RETURN_MASK_ALL) 881 { 882 int objfile_is_library = (objfile->flags & OBJF_SHARED); 883 884 switch (ex.error) 885 { 886 case TLS_NO_LIBRARY_SUPPORT_ERROR: 887 error (_("Cannot find thread-local variables " 888 "in this thread library.")); 889 break; 890 case TLS_LOAD_MODULE_NOT_FOUND_ERROR: 891 if (objfile_is_library) 892 error (_("Cannot find shared library `%s' in dynamic" 893 " linker's load module list"), objfile_name (objfile)); 894 else 895 error (_("Cannot find executable file `%s' in dynamic" 896 " linker's load module list"), objfile_name (objfile)); 897 break; 898 case TLS_NOT_ALLOCATED_YET_ERROR: 899 if (objfile_is_library) 900 error (_("The inferior has not yet allocated storage for" 901 " thread-local variables in\n" 902 "the shared library `%s'\n" 903 "for %s"), 904 objfile_name (objfile), target_pid_to_str (ptid)); 905 else 906 error (_("The inferior has not yet allocated storage for" 907 " thread-local variables in\n" 908 "the executable `%s'\n" 909 "for %s"), 910 objfile_name (objfile), target_pid_to_str (ptid)); 911 break; 912 case TLS_GENERIC_ERROR: 913 if (objfile_is_library) 914 error (_("Cannot find thread-local storage for %s, " 915 "shared library %s:\n%s"), 916 target_pid_to_str (ptid), 917 objfile_name (objfile), ex.message); 918 else 919 error (_("Cannot find thread-local storage for %s, " 920 "executable file %s:\n%s"), 921 target_pid_to_str (ptid), 922 objfile_name (objfile), ex.message); 923 break; 924 default: 925 throw_exception (ex); 926 break; 927 } 928 } 929 END_CATCH 930 } 931 /* It wouldn't be wrong here to try a gdbarch method, too; finding 932 TLS is an ABI-specific thing. But we don't do that yet. */ 933 else 934 error (_("Cannot find thread-local variables on this target")); 935 936 return addr; 937 } 938 939 const char * 940 target_xfer_status_to_string (enum target_xfer_status status) 941 { 942 #define CASE(X) case X: return #X 943 switch (status) 944 { 945 CASE(TARGET_XFER_E_IO); 946 CASE(TARGET_XFER_UNAVAILABLE); 947 default: 948 return "<unknown>"; 949 } 950 #undef CASE 951 }; 952 953 954 #undef MIN 955 #define MIN(A, B) (((A) <= (B)) ? (A) : (B)) 956 957 /* target_read_string -- read a null terminated string, up to LEN bytes, 958 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful. 959 Set *STRING to a pointer to malloc'd memory containing the data; the caller 960 is responsible for freeing it. Return the number of bytes successfully 961 read. */ 962 963 int 964 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop) 965 { 966 int tlen, offset, i; 967 gdb_byte buf[4]; 968 int errcode = 0; 969 char *buffer; 970 int buffer_allocated; 971 char *bufptr; 972 unsigned int nbytes_read = 0; 973 974 gdb_assert (string); 975 976 /* Small for testing. */ 977 buffer_allocated = 4; 978 buffer = (char *) xmalloc (buffer_allocated); 979 bufptr = buffer; 980 981 while (len > 0) 982 { 983 tlen = MIN (len, 4 - (memaddr & 3)); 984 offset = memaddr & 3; 985 986 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf); 987 if (errcode != 0) 988 { 989 /* The transfer request might have crossed the boundary to an 990 unallocated region of memory. Retry the transfer, requesting 991 a single byte. */ 992 tlen = 1; 993 offset = 0; 994 errcode = target_read_memory (memaddr, buf, 1); 995 if (errcode != 0) 996 goto done; 997 } 998 999 if (bufptr - buffer + tlen > buffer_allocated) 1000 { 1001 unsigned int bytes; 1002 1003 bytes = bufptr - buffer; 1004 buffer_allocated *= 2; 1005 buffer = (char *) xrealloc (buffer, buffer_allocated); 1006 bufptr = buffer + bytes; 1007 } 1008 1009 for (i = 0; i < tlen; i++) 1010 { 1011 *bufptr++ = buf[i + offset]; 1012 if (buf[i + offset] == '\000') 1013 { 1014 nbytes_read += i + 1; 1015 goto done; 1016 } 1017 } 1018 1019 memaddr += tlen; 1020 len -= tlen; 1021 nbytes_read += tlen; 1022 } 1023 done: 1024 *string = buffer; 1025 if (errnop != NULL) 1026 *errnop = errcode; 1027 return nbytes_read; 1028 } 1029 1030 struct target_section_table * 1031 target_get_section_table (struct target_ops *target) 1032 { 1033 return (*target->to_get_section_table) (target); 1034 } 1035 1036 /* Find a section containing ADDR. */ 1037 1038 struct target_section * 1039 target_section_by_addr (struct target_ops *target, CORE_ADDR addr) 1040 { 1041 struct target_section_table *table = target_get_section_table (target); 1042 struct target_section *secp; 1043 1044 if (table == NULL) 1045 return NULL; 1046 1047 for (secp = table->sections; secp < table->sections_end; secp++) 1048 { 1049 if (addr >= secp->addr && addr < secp->endaddr) 1050 return secp; 1051 } 1052 return NULL; 1053 } 1054 1055 1056 /* Helper for the memory xfer routines. Checks the attributes of the 1057 memory region of MEMADDR against the read or write being attempted. 1058 If the access is permitted returns true, otherwise returns false. 1059 REGION_P is an optional output parameter. If not-NULL, it is 1060 filled with a pointer to the memory region of MEMADDR. REG_LEN 1061 returns LEN trimmed to the end of the region. This is how much the 1062 caller can continue requesting, if the access is permitted. A 1063 single xfer request must not straddle memory region boundaries. */ 1064 1065 static int 1066 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf, 1067 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len, 1068 struct mem_region **region_p) 1069 { 1070 struct mem_region *region; 1071 1072 region = lookup_mem_region (memaddr); 1073 1074 if (region_p != NULL) 1075 *region_p = region; 1076 1077 switch (region->attrib.mode) 1078 { 1079 case MEM_RO: 1080 if (writebuf != NULL) 1081 return 0; 1082 break; 1083 1084 case MEM_WO: 1085 if (readbuf != NULL) 1086 return 0; 1087 break; 1088 1089 case MEM_FLASH: 1090 /* We only support writing to flash during "load" for now. */ 1091 if (writebuf != NULL) 1092 error (_("Writing to flash memory forbidden in this context")); 1093 break; 1094 1095 case MEM_NONE: 1096 return 0; 1097 } 1098 1099 /* region->hi == 0 means there's no upper bound. */ 1100 if (memaddr + len < region->hi || region->hi == 0) 1101 *reg_len = len; 1102 else 1103 *reg_len = region->hi - memaddr; 1104 1105 return 1; 1106 } 1107 1108 /* Read memory from more than one valid target. A core file, for 1109 instance, could have some of memory but delegate other bits to 1110 the target below it. So, we must manually try all targets. */ 1111 1112 enum target_xfer_status 1113 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf, 1114 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len, 1115 ULONGEST *xfered_len) 1116 { 1117 enum target_xfer_status res; 1118 1119 do 1120 { 1121 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL, 1122 readbuf, writebuf, memaddr, len, 1123 xfered_len); 1124 if (res == TARGET_XFER_OK) 1125 break; 1126 1127 /* Stop if the target reports that the memory is not available. */ 1128 if (res == TARGET_XFER_UNAVAILABLE) 1129 break; 1130 1131 /* We want to continue past core files to executables, but not 1132 past a running target's memory. */ 1133 if (ops->to_has_all_memory (ops)) 1134 break; 1135 1136 ops = ops->beneath; 1137 } 1138 while (ops != NULL); 1139 1140 /* The cache works at the raw memory level. Make sure the cache 1141 gets updated with raw contents no matter what kind of memory 1142 object was originally being written. Note we do write-through 1143 first, so that if it fails, we don't write to the cache contents 1144 that never made it to the target. */ 1145 if (writebuf != NULL 1146 && !ptid_equal (inferior_ptid, null_ptid) 1147 && target_dcache_init_p () 1148 && (stack_cache_enabled_p () || code_cache_enabled_p ())) 1149 { 1150 DCACHE *dcache = target_dcache_get (); 1151 1152 /* Note that writing to an area of memory which wasn't present 1153 in the cache doesn't cause it to be loaded in. */ 1154 dcache_update (dcache, res, memaddr, writebuf, *xfered_len); 1155 } 1156 1157 return res; 1158 } 1159 1160 /* Perform a partial memory transfer. 1161 For docs see target.h, to_xfer_partial. */ 1162 1163 static enum target_xfer_status 1164 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object, 1165 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr, 1166 ULONGEST len, ULONGEST *xfered_len) 1167 { 1168 enum target_xfer_status res; 1169 ULONGEST reg_len; 1170 struct mem_region *region; 1171 struct inferior *inf; 1172 1173 /* For accesses to unmapped overlay sections, read directly from 1174 files. Must do this first, as MEMADDR may need adjustment. */ 1175 if (readbuf != NULL && overlay_debugging) 1176 { 1177 struct obj_section *section = find_pc_overlay (memaddr); 1178 1179 if (pc_in_unmapped_range (memaddr, section)) 1180 { 1181 struct target_section_table *table 1182 = target_get_section_table (ops); 1183 const char *section_name = section->the_bfd_section->name; 1184 1185 memaddr = overlay_mapped_address (memaddr, section); 1186 return section_table_xfer_memory_partial (readbuf, writebuf, 1187 memaddr, len, xfered_len, 1188 table->sections, 1189 table->sections_end, 1190 section_name); 1191 } 1192 } 1193 1194 /* Try the executable files, if "trust-readonly-sections" is set. */ 1195 if (readbuf != NULL && trust_readonly) 1196 { 1197 struct target_section *secp; 1198 struct target_section_table *table; 1199 1200 secp = target_section_by_addr (ops, memaddr); 1201 if (secp != NULL 1202 && (bfd_get_section_flags (secp->the_bfd_section->owner, 1203 secp->the_bfd_section) 1204 & SEC_READONLY)) 1205 { 1206 table = target_get_section_table (ops); 1207 return section_table_xfer_memory_partial (readbuf, writebuf, 1208 memaddr, len, xfered_len, 1209 table->sections, 1210 table->sections_end, 1211 NULL); 1212 } 1213 } 1214 1215 /* Try GDB's internal data cache. */ 1216 1217 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, ®_len, 1218 ®ion)) 1219 return TARGET_XFER_E_IO; 1220 1221 if (!ptid_equal (inferior_ptid, null_ptid)) 1222 inf = find_inferior_ptid (inferior_ptid); 1223 else 1224 inf = NULL; 1225 1226 if (inf != NULL 1227 && readbuf != NULL 1228 /* The dcache reads whole cache lines; that doesn't play well 1229 with reading from a trace buffer, because reading outside of 1230 the collected memory range fails. */ 1231 && get_traceframe_number () == -1 1232 && (region->attrib.cache 1233 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY) 1234 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY))) 1235 { 1236 DCACHE *dcache = target_dcache_get_or_init (); 1237 1238 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf, 1239 reg_len, xfered_len); 1240 } 1241 1242 /* If none of those methods found the memory we wanted, fall back 1243 to a target partial transfer. Normally a single call to 1244 to_xfer_partial is enough; if it doesn't recognize an object 1245 it will call the to_xfer_partial of the next target down. 1246 But for memory this won't do. Memory is the only target 1247 object which can be read from more than one valid target. 1248 A core file, for instance, could have some of memory but 1249 delegate other bits to the target below it. So, we must 1250 manually try all targets. */ 1251 1252 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len, 1253 xfered_len); 1254 1255 /* If we still haven't got anything, return the last error. We 1256 give up. */ 1257 return res; 1258 } 1259 1260 /* Perform a partial memory transfer. For docs see target.h, 1261 to_xfer_partial. */ 1262 1263 static enum target_xfer_status 1264 memory_xfer_partial (struct target_ops *ops, enum target_object object, 1265 gdb_byte *readbuf, const gdb_byte *writebuf, 1266 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len) 1267 { 1268 enum target_xfer_status res; 1269 1270 /* Zero length requests are ok and require no work. */ 1271 if (len == 0) 1272 return TARGET_XFER_EOF; 1273 1274 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with 1275 breakpoint insns, thus hiding out from higher layers whether 1276 there are software breakpoints inserted in the code stream. */ 1277 if (readbuf != NULL) 1278 { 1279 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len, 1280 xfered_len); 1281 1282 if (res == TARGET_XFER_OK && !show_memory_breakpoints) 1283 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len); 1284 } 1285 else 1286 { 1287 gdb_byte *buf; 1288 struct cleanup *old_chain; 1289 1290 /* A large write request is likely to be partially satisfied 1291 by memory_xfer_partial_1. We will continually malloc 1292 and free a copy of the entire write request for breakpoint 1293 shadow handling even though we only end up writing a small 1294 subset of it. Cap writes to a limit specified by the target 1295 to mitigate this. */ 1296 len = std::min (ops->to_get_memory_xfer_limit (ops), len); 1297 1298 buf = (gdb_byte *) xmalloc (len); 1299 old_chain = make_cleanup (xfree, buf); 1300 memcpy (buf, writebuf, len); 1301 1302 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len); 1303 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len, 1304 xfered_len); 1305 1306 do_cleanups (old_chain); 1307 } 1308 1309 return res; 1310 } 1311 1312 static void 1313 restore_show_memory_breakpoints (void *arg) 1314 { 1315 show_memory_breakpoints = (uintptr_t) arg; 1316 } 1317 1318 struct cleanup * 1319 make_show_memory_breakpoints_cleanup (int show) 1320 { 1321 int current = show_memory_breakpoints; 1322 1323 show_memory_breakpoints = show; 1324 return make_cleanup (restore_show_memory_breakpoints, 1325 (void *) (uintptr_t) current); 1326 } 1327 1328 /* For docs see target.h, to_xfer_partial. */ 1329 1330 enum target_xfer_status 1331 target_xfer_partial (struct target_ops *ops, 1332 enum target_object object, const char *annex, 1333 gdb_byte *readbuf, const gdb_byte *writebuf, 1334 ULONGEST offset, ULONGEST len, 1335 ULONGEST *xfered_len) 1336 { 1337 enum target_xfer_status retval; 1338 1339 gdb_assert (ops->to_xfer_partial != NULL); 1340 1341 /* Transfer is done when LEN is zero. */ 1342 if (len == 0) 1343 return TARGET_XFER_EOF; 1344 1345 if (writebuf && !may_write_memory) 1346 error (_("Writing to memory is not allowed (addr %s, len %s)"), 1347 core_addr_to_string_nz (offset), plongest (len)); 1348 1349 *xfered_len = 0; 1350 1351 /* If this is a memory transfer, let the memory-specific code 1352 have a look at it instead. Memory transfers are more 1353 complicated. */ 1354 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY 1355 || object == TARGET_OBJECT_CODE_MEMORY) 1356 retval = memory_xfer_partial (ops, object, readbuf, 1357 writebuf, offset, len, xfered_len); 1358 else if (object == TARGET_OBJECT_RAW_MEMORY) 1359 { 1360 /* Skip/avoid accessing the target if the memory region 1361 attributes block the access. Check this here instead of in 1362 raw_memory_xfer_partial as otherwise we'd end up checking 1363 this twice in the case of the memory_xfer_partial path is 1364 taken; once before checking the dcache, and another in the 1365 tail call to raw_memory_xfer_partial. */ 1366 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len, 1367 NULL)) 1368 return TARGET_XFER_E_IO; 1369 1370 /* Request the normal memory object from other layers. */ 1371 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len, 1372 xfered_len); 1373 } 1374 else 1375 retval = ops->to_xfer_partial (ops, object, annex, readbuf, 1376 writebuf, offset, len, xfered_len); 1377 1378 if (targetdebug) 1379 { 1380 const unsigned char *myaddr = NULL; 1381 1382 fprintf_unfiltered (gdb_stdlog, 1383 "%s:target_xfer_partial " 1384 "(%d, %s, %s, %s, %s, %s) = %d, %s", 1385 ops->to_shortname, 1386 (int) object, 1387 (annex ? annex : "(null)"), 1388 host_address_to_string (readbuf), 1389 host_address_to_string (writebuf), 1390 core_addr_to_string_nz (offset), 1391 pulongest (len), retval, 1392 pulongest (*xfered_len)); 1393 1394 if (readbuf) 1395 myaddr = readbuf; 1396 if (writebuf) 1397 myaddr = writebuf; 1398 if (retval == TARGET_XFER_OK && myaddr != NULL) 1399 { 1400 int i; 1401 1402 fputs_unfiltered (", bytes =", gdb_stdlog); 1403 for (i = 0; i < *xfered_len; i++) 1404 { 1405 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0) 1406 { 1407 if (targetdebug < 2 && i > 0) 1408 { 1409 fprintf_unfiltered (gdb_stdlog, " ..."); 1410 break; 1411 } 1412 fprintf_unfiltered (gdb_stdlog, "\n"); 1413 } 1414 1415 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff); 1416 } 1417 } 1418 1419 fputc_unfiltered ('\n', gdb_stdlog); 1420 } 1421 1422 /* Check implementations of to_xfer_partial update *XFERED_LEN 1423 properly. Do assertion after printing debug messages, so that we 1424 can find more clues on assertion failure from debugging messages. */ 1425 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE) 1426 gdb_assert (*xfered_len > 0); 1427 1428 return retval; 1429 } 1430 1431 /* Read LEN bytes of target memory at address MEMADDR, placing the 1432 results in GDB's memory at MYADDR. Returns either 0 for success or 1433 -1 if any error occurs. 1434 1435 If an error occurs, no guarantee is made about the contents of the data at 1436 MYADDR. In particular, the caller should not depend upon partial reads 1437 filling the buffer with good data. There is no way for the caller to know 1438 how much good data might have been transfered anyway. Callers that can 1439 deal with partial reads should call target_read (which will retry until 1440 it makes no progress, and then return how much was transferred). */ 1441 1442 int 1443 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) 1444 { 1445 /* Dispatch to the topmost target, not the flattened current_target. 1446 Memory accesses check target->to_has_(all_)memory, and the 1447 flattened target doesn't inherit those. */ 1448 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL, 1449 myaddr, memaddr, len) == len) 1450 return 0; 1451 else 1452 return -1; 1453 } 1454 1455 /* See target/target.h. */ 1456 1457 int 1458 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result) 1459 { 1460 gdb_byte buf[4]; 1461 int r; 1462 1463 r = target_read_memory (memaddr, buf, sizeof buf); 1464 if (r != 0) 1465 return r; 1466 *result = extract_unsigned_integer (buf, sizeof buf, 1467 gdbarch_byte_order (target_gdbarch ())); 1468 return 0; 1469 } 1470 1471 /* Like target_read_memory, but specify explicitly that this is a read 1472 from the target's raw memory. That is, this read bypasses the 1473 dcache, breakpoint shadowing, etc. */ 1474 1475 int 1476 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) 1477 { 1478 /* See comment in target_read_memory about why the request starts at 1479 current_target.beneath. */ 1480 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL, 1481 myaddr, memaddr, len) == len) 1482 return 0; 1483 else 1484 return -1; 1485 } 1486 1487 /* Like target_read_memory, but specify explicitly that this is a read from 1488 the target's stack. This may trigger different cache behavior. */ 1489 1490 int 1491 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) 1492 { 1493 /* See comment in target_read_memory about why the request starts at 1494 current_target.beneath. */ 1495 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL, 1496 myaddr, memaddr, len) == len) 1497 return 0; 1498 else 1499 return -1; 1500 } 1501 1502 /* Like target_read_memory, but specify explicitly that this is a read from 1503 the target's code. This may trigger different cache behavior. */ 1504 1505 int 1506 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) 1507 { 1508 /* See comment in target_read_memory about why the request starts at 1509 current_target.beneath. */ 1510 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL, 1511 myaddr, memaddr, len) == len) 1512 return 0; 1513 else 1514 return -1; 1515 } 1516 1517 /* Write LEN bytes from MYADDR to target memory at address MEMADDR. 1518 Returns either 0 for success or -1 if any error occurs. If an 1519 error occurs, no guarantee is made about how much data got written. 1520 Callers that can deal with partial writes should call 1521 target_write. */ 1522 1523 int 1524 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len) 1525 { 1526 /* See comment in target_read_memory about why the request starts at 1527 current_target.beneath. */ 1528 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL, 1529 myaddr, memaddr, len) == len) 1530 return 0; 1531 else 1532 return -1; 1533 } 1534 1535 /* Write LEN bytes from MYADDR to target raw memory at address 1536 MEMADDR. Returns either 0 for success or -1 if any error occurs. 1537 If an error occurs, no guarantee is made about how much data got 1538 written. Callers that can deal with partial writes should call 1539 target_write. */ 1540 1541 int 1542 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len) 1543 { 1544 /* See comment in target_read_memory about why the request starts at 1545 current_target.beneath. */ 1546 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL, 1547 myaddr, memaddr, len) == len) 1548 return 0; 1549 else 1550 return -1; 1551 } 1552 1553 /* Fetch the target's memory map. */ 1554 1555 VEC(mem_region_s) * 1556 target_memory_map (void) 1557 { 1558 VEC(mem_region_s) *result; 1559 struct mem_region *last_one, *this_one; 1560 int ix; 1561 result = current_target.to_memory_map (¤t_target); 1562 if (result == NULL) 1563 return NULL; 1564 1565 qsort (VEC_address (mem_region_s, result), 1566 VEC_length (mem_region_s, result), 1567 sizeof (struct mem_region), mem_region_cmp); 1568 1569 /* Check that regions do not overlap. Simultaneously assign 1570 a numbering for the "mem" commands to use to refer to 1571 each region. */ 1572 last_one = NULL; 1573 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++) 1574 { 1575 this_one->number = ix; 1576 1577 if (last_one && last_one->hi > this_one->lo) 1578 { 1579 warning (_("Overlapping regions in memory map: ignoring")); 1580 VEC_free (mem_region_s, result); 1581 return NULL; 1582 } 1583 last_one = this_one; 1584 } 1585 1586 return result; 1587 } 1588 1589 void 1590 target_flash_erase (ULONGEST address, LONGEST length) 1591 { 1592 current_target.to_flash_erase (¤t_target, address, length); 1593 } 1594 1595 void 1596 target_flash_done (void) 1597 { 1598 current_target.to_flash_done (¤t_target); 1599 } 1600 1601 static void 1602 show_trust_readonly (struct ui_file *file, int from_tty, 1603 struct cmd_list_element *c, const char *value) 1604 { 1605 fprintf_filtered (file, 1606 _("Mode for reading from readonly sections is %s.\n"), 1607 value); 1608 } 1609 1610 /* Target vector read/write partial wrapper functions. */ 1611 1612 static enum target_xfer_status 1613 target_read_partial (struct target_ops *ops, 1614 enum target_object object, 1615 const char *annex, gdb_byte *buf, 1616 ULONGEST offset, ULONGEST len, 1617 ULONGEST *xfered_len) 1618 { 1619 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len, 1620 xfered_len); 1621 } 1622 1623 static enum target_xfer_status 1624 target_write_partial (struct target_ops *ops, 1625 enum target_object object, 1626 const char *annex, const gdb_byte *buf, 1627 ULONGEST offset, LONGEST len, ULONGEST *xfered_len) 1628 { 1629 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len, 1630 xfered_len); 1631 } 1632 1633 /* Wrappers to perform the full transfer. */ 1634 1635 /* For docs on target_read see target.h. */ 1636 1637 LONGEST 1638 target_read (struct target_ops *ops, 1639 enum target_object object, 1640 const char *annex, gdb_byte *buf, 1641 ULONGEST offset, LONGEST len) 1642 { 1643 LONGEST xfered_total = 0; 1644 int unit_size = 1; 1645 1646 /* If we are reading from a memory object, find the length of an addressable 1647 unit for that architecture. */ 1648 if (object == TARGET_OBJECT_MEMORY 1649 || object == TARGET_OBJECT_STACK_MEMORY 1650 || object == TARGET_OBJECT_CODE_MEMORY 1651 || object == TARGET_OBJECT_RAW_MEMORY) 1652 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ()); 1653 1654 while (xfered_total < len) 1655 { 1656 ULONGEST xfered_partial; 1657 enum target_xfer_status status; 1658 1659 status = target_read_partial (ops, object, annex, 1660 buf + xfered_total * unit_size, 1661 offset + xfered_total, len - xfered_total, 1662 &xfered_partial); 1663 1664 /* Call an observer, notifying them of the xfer progress? */ 1665 if (status == TARGET_XFER_EOF) 1666 return xfered_total; 1667 else if (status == TARGET_XFER_OK) 1668 { 1669 xfered_total += xfered_partial; 1670 QUIT; 1671 } 1672 else 1673 return TARGET_XFER_E_IO; 1674 1675 } 1676 return len; 1677 } 1678 1679 /* Assuming that the entire [begin, end) range of memory cannot be 1680 read, try to read whatever subrange is possible to read. 1681 1682 The function returns, in RESULT, either zero or one memory block. 1683 If there's a readable subrange at the beginning, it is completely 1684 read and returned. Any further readable subrange will not be read. 1685 Otherwise, if there's a readable subrange at the end, it will be 1686 completely read and returned. Any readable subranges before it 1687 (obviously, not starting at the beginning), will be ignored. In 1688 other cases -- either no readable subrange, or readable subrange(s) 1689 that is neither at the beginning, or end, nothing is returned. 1690 1691 The purpose of this function is to handle a read across a boundary 1692 of accessible memory in a case when memory map is not available. 1693 The above restrictions are fine for this case, but will give 1694 incorrect results if the memory is 'patchy'. However, supporting 1695 'patchy' memory would require trying to read every single byte, 1696 and it seems unacceptable solution. Explicit memory map is 1697 recommended for this case -- and target_read_memory_robust will 1698 take care of reading multiple ranges then. */ 1699 1700 static void 1701 read_whatever_is_readable (struct target_ops *ops, 1702 const ULONGEST begin, const ULONGEST end, 1703 int unit_size, 1704 VEC(memory_read_result_s) **result) 1705 { 1706 gdb_byte *buf = (gdb_byte *) xmalloc (end - begin); 1707 ULONGEST current_begin = begin; 1708 ULONGEST current_end = end; 1709 int forward; 1710 memory_read_result_s r; 1711 ULONGEST xfered_len; 1712 1713 /* If we previously failed to read 1 byte, nothing can be done here. */ 1714 if (end - begin <= 1) 1715 { 1716 xfree (buf); 1717 return; 1718 } 1719 1720 /* Check that either first or the last byte is readable, and give up 1721 if not. This heuristic is meant to permit reading accessible memory 1722 at the boundary of accessible region. */ 1723 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL, 1724 buf, begin, 1, &xfered_len) == TARGET_XFER_OK) 1725 { 1726 forward = 1; 1727 ++current_begin; 1728 } 1729 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL, 1730 buf + (end - begin) - 1, end - 1, 1, 1731 &xfered_len) == TARGET_XFER_OK) 1732 { 1733 forward = 0; 1734 --current_end; 1735 } 1736 else 1737 { 1738 xfree (buf); 1739 return; 1740 } 1741 1742 /* Loop invariant is that the [current_begin, current_end) was previously 1743 found to be not readable as a whole. 1744 1745 Note loop condition -- if the range has 1 byte, we can't divide the range 1746 so there's no point trying further. */ 1747 while (current_end - current_begin > 1) 1748 { 1749 ULONGEST first_half_begin, first_half_end; 1750 ULONGEST second_half_begin, second_half_end; 1751 LONGEST xfer; 1752 ULONGEST middle = current_begin + (current_end - current_begin) / 2; 1753 1754 if (forward) 1755 { 1756 first_half_begin = current_begin; 1757 first_half_end = middle; 1758 second_half_begin = middle; 1759 second_half_end = current_end; 1760 } 1761 else 1762 { 1763 first_half_begin = middle; 1764 first_half_end = current_end; 1765 second_half_begin = current_begin; 1766 second_half_end = middle; 1767 } 1768 1769 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL, 1770 buf + (first_half_begin - begin) * unit_size, 1771 first_half_begin, 1772 first_half_end - first_half_begin); 1773 1774 if (xfer == first_half_end - first_half_begin) 1775 { 1776 /* This half reads up fine. So, the error must be in the 1777 other half. */ 1778 current_begin = second_half_begin; 1779 current_end = second_half_end; 1780 } 1781 else 1782 { 1783 /* This half is not readable. Because we've tried one byte, we 1784 know some part of this half if actually readable. Go to the next 1785 iteration to divide again and try to read. 1786 1787 We don't handle the other half, because this function only tries 1788 to read a single readable subrange. */ 1789 current_begin = first_half_begin; 1790 current_end = first_half_end; 1791 } 1792 } 1793 1794 if (forward) 1795 { 1796 /* The [begin, current_begin) range has been read. */ 1797 r.begin = begin; 1798 r.end = current_begin; 1799 r.data = buf; 1800 } 1801 else 1802 { 1803 /* The [current_end, end) range has been read. */ 1804 LONGEST region_len = end - current_end; 1805 1806 r.data = (gdb_byte *) xmalloc (region_len * unit_size); 1807 memcpy (r.data, buf + (current_end - begin) * unit_size, 1808 region_len * unit_size); 1809 r.begin = current_end; 1810 r.end = end; 1811 xfree (buf); 1812 } 1813 VEC_safe_push(memory_read_result_s, (*result), &r); 1814 } 1815 1816 void 1817 free_memory_read_result_vector (void *x) 1818 { 1819 VEC(memory_read_result_s) **v = (VEC(memory_read_result_s) **) x; 1820 memory_read_result_s *current; 1821 int ix; 1822 1823 for (ix = 0; VEC_iterate (memory_read_result_s, *v, ix, current); ++ix) 1824 { 1825 xfree (current->data); 1826 } 1827 VEC_free (memory_read_result_s, *v); 1828 } 1829 1830 VEC(memory_read_result_s) * 1831 read_memory_robust (struct target_ops *ops, 1832 const ULONGEST offset, const LONGEST len) 1833 { 1834 VEC(memory_read_result_s) *result = 0; 1835 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ()); 1836 struct cleanup *cleanup = make_cleanup (free_memory_read_result_vector, 1837 &result); 1838 1839 LONGEST xfered_total = 0; 1840 while (xfered_total < len) 1841 { 1842 struct mem_region *region = lookup_mem_region (offset + xfered_total); 1843 LONGEST region_len; 1844 1845 /* If there is no explicit region, a fake one should be created. */ 1846 gdb_assert (region); 1847 1848 if (region->hi == 0) 1849 region_len = len - xfered_total; 1850 else 1851 region_len = region->hi - offset; 1852 1853 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO) 1854 { 1855 /* Cannot read this region. Note that we can end up here only 1856 if the region is explicitly marked inaccessible, or 1857 'inaccessible-by-default' is in effect. */ 1858 xfered_total += region_len; 1859 } 1860 else 1861 { 1862 LONGEST to_read = std::min (len - xfered_total, region_len); 1863 gdb_byte *buffer = (gdb_byte *) xmalloc (to_read * unit_size); 1864 struct cleanup *inner_cleanup = make_cleanup (xfree, buffer); 1865 1866 LONGEST xfered_partial = 1867 target_read (ops, TARGET_OBJECT_MEMORY, NULL, 1868 (gdb_byte *) buffer, 1869 offset + xfered_total, to_read); 1870 /* Call an observer, notifying them of the xfer progress? */ 1871 if (xfered_partial <= 0) 1872 { 1873 /* Got an error reading full chunk. See if maybe we can read 1874 some subrange. */ 1875 do_cleanups (inner_cleanup); 1876 read_whatever_is_readable (ops, offset + xfered_total, 1877 offset + xfered_total + to_read, 1878 unit_size, &result); 1879 xfered_total += to_read; 1880 } 1881 else 1882 { 1883 struct memory_read_result r; 1884 1885 discard_cleanups (inner_cleanup); 1886 r.data = buffer; 1887 r.begin = offset + xfered_total; 1888 r.end = r.begin + xfered_partial; 1889 VEC_safe_push (memory_read_result_s, result, &r); 1890 xfered_total += xfered_partial; 1891 } 1892 QUIT; 1893 } 1894 } 1895 1896 discard_cleanups (cleanup); 1897 return result; 1898 } 1899 1900 1901 /* An alternative to target_write with progress callbacks. */ 1902 1903 LONGEST 1904 target_write_with_progress (struct target_ops *ops, 1905 enum target_object object, 1906 const char *annex, const gdb_byte *buf, 1907 ULONGEST offset, LONGEST len, 1908 void (*progress) (ULONGEST, void *), void *baton) 1909 { 1910 LONGEST xfered_total = 0; 1911 int unit_size = 1; 1912 1913 /* If we are writing to a memory object, find the length of an addressable 1914 unit for that architecture. */ 1915 if (object == TARGET_OBJECT_MEMORY 1916 || object == TARGET_OBJECT_STACK_MEMORY 1917 || object == TARGET_OBJECT_CODE_MEMORY 1918 || object == TARGET_OBJECT_RAW_MEMORY) 1919 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ()); 1920 1921 /* Give the progress callback a chance to set up. */ 1922 if (progress) 1923 (*progress) (0, baton); 1924 1925 while (xfered_total < len) 1926 { 1927 ULONGEST xfered_partial; 1928 enum target_xfer_status status; 1929 1930 status = target_write_partial (ops, object, annex, 1931 buf + xfered_total * unit_size, 1932 offset + xfered_total, len - xfered_total, 1933 &xfered_partial); 1934 1935 if (status != TARGET_XFER_OK) 1936 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO; 1937 1938 if (progress) 1939 (*progress) (xfered_partial, baton); 1940 1941 xfered_total += xfered_partial; 1942 QUIT; 1943 } 1944 return len; 1945 } 1946 1947 /* For docs on target_write see target.h. */ 1948 1949 LONGEST 1950 target_write (struct target_ops *ops, 1951 enum target_object object, 1952 const char *annex, const gdb_byte *buf, 1953 ULONGEST offset, LONGEST len) 1954 { 1955 return target_write_with_progress (ops, object, annex, buf, offset, len, 1956 NULL, NULL); 1957 } 1958 1959 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return 1960 the size of the transferred data. PADDING additional bytes are 1961 available in *BUF_P. This is a helper function for 1962 target_read_alloc; see the declaration of that function for more 1963 information. */ 1964 1965 static LONGEST 1966 target_read_alloc_1 (struct target_ops *ops, enum target_object object, 1967 const char *annex, gdb_byte **buf_p, int padding) 1968 { 1969 size_t buf_alloc, buf_pos; 1970 gdb_byte *buf; 1971 1972 /* This function does not have a length parameter; it reads the 1973 entire OBJECT). Also, it doesn't support objects fetched partly 1974 from one target and partly from another (in a different stratum, 1975 e.g. a core file and an executable). Both reasons make it 1976 unsuitable for reading memory. */ 1977 gdb_assert (object != TARGET_OBJECT_MEMORY); 1978 1979 /* Start by reading up to 4K at a time. The target will throttle 1980 this number down if necessary. */ 1981 buf_alloc = 4096; 1982 buf = (gdb_byte *) xmalloc (buf_alloc); 1983 buf_pos = 0; 1984 while (1) 1985 { 1986 ULONGEST xfered_len; 1987 enum target_xfer_status status; 1988 1989 status = target_read_partial (ops, object, annex, &buf[buf_pos], 1990 buf_pos, buf_alloc - buf_pos - padding, 1991 &xfered_len); 1992 1993 if (status == TARGET_XFER_EOF) 1994 { 1995 /* Read all there was. */ 1996 if (buf_pos == 0) 1997 xfree (buf); 1998 else 1999 *buf_p = buf; 2000 return buf_pos; 2001 } 2002 else if (status != TARGET_XFER_OK) 2003 { 2004 /* An error occurred. */ 2005 xfree (buf); 2006 return TARGET_XFER_E_IO; 2007 } 2008 2009 buf_pos += xfered_len; 2010 2011 /* If the buffer is filling up, expand it. */ 2012 if (buf_alloc < buf_pos * 2) 2013 { 2014 buf_alloc *= 2; 2015 buf = (gdb_byte *) xrealloc (buf, buf_alloc); 2016 } 2017 2018 QUIT; 2019 } 2020 } 2021 2022 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return 2023 the size of the transferred data. See the declaration in "target.h" 2024 function for more information about the return value. */ 2025 2026 LONGEST 2027 target_read_alloc (struct target_ops *ops, enum target_object object, 2028 const char *annex, gdb_byte **buf_p) 2029 { 2030 return target_read_alloc_1 (ops, object, annex, buf_p, 0); 2031 } 2032 2033 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and 2034 returned as a string, allocated using xmalloc. If an error occurs 2035 or the transfer is unsupported, NULL is returned. Empty objects 2036 are returned as allocated but empty strings. A warning is issued 2037 if the result contains any embedded NUL bytes. */ 2038 2039 char * 2040 target_read_stralloc (struct target_ops *ops, enum target_object object, 2041 const char *annex) 2042 { 2043 gdb_byte *buffer; 2044 char *bufstr; 2045 LONGEST i, transferred; 2046 2047 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1); 2048 bufstr = (char *) buffer; 2049 2050 if (transferred < 0) 2051 return NULL; 2052 2053 if (transferred == 0) 2054 return xstrdup (""); 2055 2056 bufstr[transferred] = 0; 2057 2058 /* Check for embedded NUL bytes; but allow trailing NULs. */ 2059 for (i = strlen (bufstr); i < transferred; i++) 2060 if (bufstr[i] != 0) 2061 { 2062 warning (_("target object %d, annex %s, " 2063 "contained unexpected null characters"), 2064 (int) object, annex ? annex : "(none)"); 2065 break; 2066 } 2067 2068 return bufstr; 2069 } 2070 2071 /* Memory transfer methods. */ 2072 2073 void 2074 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf, 2075 LONGEST len) 2076 { 2077 /* This method is used to read from an alternate, non-current 2078 target. This read must bypass the overlay support (as symbols 2079 don't match this target), and GDB's internal cache (wrong cache 2080 for this target). */ 2081 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len) 2082 != len) 2083 memory_error (TARGET_XFER_E_IO, addr); 2084 } 2085 2086 ULONGEST 2087 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr, 2088 int len, enum bfd_endian byte_order) 2089 { 2090 gdb_byte buf[sizeof (ULONGEST)]; 2091 2092 gdb_assert (len <= sizeof (buf)); 2093 get_target_memory (ops, addr, buf, len); 2094 return extract_unsigned_integer (buf, len, byte_order); 2095 } 2096 2097 /* See target.h. */ 2098 2099 int 2100 target_insert_breakpoint (struct gdbarch *gdbarch, 2101 struct bp_target_info *bp_tgt) 2102 { 2103 if (!may_insert_breakpoints) 2104 { 2105 warning (_("May not insert breakpoints")); 2106 return 1; 2107 } 2108 2109 return current_target.to_insert_breakpoint (¤t_target, 2110 gdbarch, bp_tgt); 2111 } 2112 2113 /* See target.h. */ 2114 2115 int 2116 target_remove_breakpoint (struct gdbarch *gdbarch, 2117 struct bp_target_info *bp_tgt, 2118 enum remove_bp_reason reason) 2119 { 2120 /* This is kind of a weird case to handle, but the permission might 2121 have been changed after breakpoints were inserted - in which case 2122 we should just take the user literally and assume that any 2123 breakpoints should be left in place. */ 2124 if (!may_insert_breakpoints) 2125 { 2126 warning (_("May not remove breakpoints")); 2127 return 1; 2128 } 2129 2130 return current_target.to_remove_breakpoint (¤t_target, 2131 gdbarch, bp_tgt, reason); 2132 } 2133 2134 static void 2135 target_info (char *args, int from_tty) 2136 { 2137 struct target_ops *t; 2138 int has_all_mem = 0; 2139 2140 if (symfile_objfile != NULL) 2141 printf_unfiltered (_("Symbols from \"%s\".\n"), 2142 objfile_name (symfile_objfile)); 2143 2144 for (t = target_stack; t != NULL; t = t->beneath) 2145 { 2146 if (!(*t->to_has_memory) (t)) 2147 continue; 2148 2149 if ((int) (t->to_stratum) <= (int) dummy_stratum) 2150 continue; 2151 if (has_all_mem) 2152 printf_unfiltered (_("\tWhile running this, " 2153 "GDB does not access memory from...\n")); 2154 printf_unfiltered ("%s:\n", t->to_longname); 2155 (t->to_files_info) (t); 2156 has_all_mem = (*t->to_has_all_memory) (t); 2157 } 2158 } 2159 2160 /* This function is called before any new inferior is created, e.g. 2161 by running a program, attaching, or connecting to a target. 2162 It cleans up any state from previous invocations which might 2163 change between runs. This is a subset of what target_preopen 2164 resets (things which might change between targets). */ 2165 2166 void 2167 target_pre_inferior (int from_tty) 2168 { 2169 /* Clear out solib state. Otherwise the solib state of the previous 2170 inferior might have survived and is entirely wrong for the new 2171 target. This has been observed on GNU/Linux using glibc 2.3. How 2172 to reproduce: 2173 2174 bash$ ./foo& 2175 [1] 4711 2176 bash$ ./foo& 2177 [1] 4712 2178 bash$ gdb ./foo 2179 [...] 2180 (gdb) attach 4711 2181 (gdb) detach 2182 (gdb) attach 4712 2183 Cannot access memory at address 0xdeadbeef 2184 */ 2185 2186 /* In some OSs, the shared library list is the same/global/shared 2187 across inferiors. If code is shared between processes, so are 2188 memory regions and features. */ 2189 if (!gdbarch_has_global_solist (target_gdbarch ())) 2190 { 2191 no_shared_libraries (NULL, from_tty); 2192 2193 invalidate_target_mem_regions (); 2194 2195 target_clear_description (); 2196 } 2197 2198 /* attach_flag may be set if the previous process associated with 2199 the inferior was attached to. */ 2200 current_inferior ()->attach_flag = 0; 2201 2202 current_inferior ()->highest_thread_num = 0; 2203 2204 agent_capability_invalidate (); 2205 } 2206 2207 /* Callback for iterate_over_inferiors. Gets rid of the given 2208 inferior. */ 2209 2210 static int 2211 dispose_inferior (struct inferior *inf, void *args) 2212 { 2213 struct thread_info *thread; 2214 2215 thread = any_thread_of_process (inf->pid); 2216 if (thread) 2217 { 2218 switch_to_thread (thread->ptid); 2219 2220 /* Core inferiors actually should be detached, not killed. */ 2221 if (target_has_execution) 2222 target_kill (); 2223 else 2224 target_detach (NULL, 0); 2225 } 2226 2227 return 0; 2228 } 2229 2230 /* This is to be called by the open routine before it does 2231 anything. */ 2232 2233 void 2234 target_preopen (int from_tty) 2235 { 2236 dont_repeat (); 2237 2238 if (have_inferiors ()) 2239 { 2240 if (!from_tty 2241 || !have_live_inferiors () 2242 || query (_("A program is being debugged already. Kill it? "))) 2243 iterate_over_inferiors (dispose_inferior, NULL); 2244 else 2245 error (_("Program not killed.")); 2246 } 2247 2248 /* Calling target_kill may remove the target from the stack. But if 2249 it doesn't (which seems like a win for UDI), remove it now. */ 2250 /* Leave the exec target, though. The user may be switching from a 2251 live process to a core of the same program. */ 2252 pop_all_targets_above (file_stratum); 2253 2254 target_pre_inferior (from_tty); 2255 } 2256 2257 /* Detach a target after doing deferred register stores. */ 2258 2259 void 2260 target_detach (const char *args, int from_tty) 2261 { 2262 if (gdbarch_has_global_breakpoints (target_gdbarch ())) 2263 /* Don't remove global breakpoints here. They're removed on 2264 disconnection from the target. */ 2265 ; 2266 else 2267 /* If we're in breakpoints-always-inserted mode, have to remove 2268 them before detaching. */ 2269 remove_breakpoints_pid (ptid_get_pid (inferior_ptid)); 2270 2271 prepare_for_detach (); 2272 2273 current_target.to_detach (¤t_target, args, from_tty); 2274 } 2275 2276 void 2277 target_disconnect (const char *args, int from_tty) 2278 { 2279 /* If we're in breakpoints-always-inserted mode or if breakpoints 2280 are global across processes, we have to remove them before 2281 disconnecting. */ 2282 remove_breakpoints (); 2283 2284 current_target.to_disconnect (¤t_target, args, from_tty); 2285 } 2286 2287 /* See target/target.h. */ 2288 2289 ptid_t 2290 target_wait (ptid_t ptid, struct target_waitstatus *status, int options) 2291 { 2292 return (current_target.to_wait) (¤t_target, ptid, status, options); 2293 } 2294 2295 /* See target.h. */ 2296 2297 ptid_t 2298 default_target_wait (struct target_ops *ops, 2299 ptid_t ptid, struct target_waitstatus *status, 2300 int options) 2301 { 2302 status->kind = TARGET_WAITKIND_IGNORE; 2303 return minus_one_ptid; 2304 } 2305 2306 const char * 2307 target_pid_to_str (ptid_t ptid) 2308 { 2309 return (*current_target.to_pid_to_str) (¤t_target, ptid); 2310 } 2311 2312 const char * 2313 target_thread_name (struct thread_info *info) 2314 { 2315 return current_target.to_thread_name (¤t_target, info); 2316 } 2317 2318 void 2319 target_resume (ptid_t ptid, int step, enum gdb_signal signal) 2320 { 2321 target_dcache_invalidate (); 2322 2323 current_target.to_resume (¤t_target, ptid, step, signal); 2324 2325 registers_changed_ptid (ptid); 2326 /* We only set the internal executing state here. The user/frontend 2327 running state is set at a higher level. */ 2328 set_executing (ptid, 1); 2329 clear_inline_frame_state (ptid); 2330 } 2331 2332 /* If true, target_commit_resume is a nop. */ 2333 static int defer_target_commit_resume; 2334 2335 /* See target.h. */ 2336 2337 void 2338 target_commit_resume (void) 2339 { 2340 struct target_ops *t; 2341 2342 if (defer_target_commit_resume) 2343 return; 2344 2345 current_target.to_commit_resume (¤t_target); 2346 } 2347 2348 /* See target.h. */ 2349 2350 struct cleanup * 2351 make_cleanup_defer_target_commit_resume (void) 2352 { 2353 struct cleanup *old_chain; 2354 2355 old_chain = make_cleanup_restore_integer (&defer_target_commit_resume); 2356 defer_target_commit_resume = 1; 2357 return old_chain; 2358 } 2359 2360 void 2361 target_pass_signals (int numsigs, unsigned char *pass_signals) 2362 { 2363 (*current_target.to_pass_signals) (¤t_target, numsigs, pass_signals); 2364 } 2365 2366 void 2367 target_program_signals (int numsigs, unsigned char *program_signals) 2368 { 2369 (*current_target.to_program_signals) (¤t_target, 2370 numsigs, program_signals); 2371 } 2372 2373 static int 2374 default_follow_fork (struct target_ops *self, int follow_child, 2375 int detach_fork) 2376 { 2377 /* Some target returned a fork event, but did not know how to follow it. */ 2378 internal_error (__FILE__, __LINE__, 2379 _("could not find a target to follow fork")); 2380 } 2381 2382 /* Look through the list of possible targets for a target that can 2383 follow forks. */ 2384 2385 int 2386 target_follow_fork (int follow_child, int detach_fork) 2387 { 2388 return current_target.to_follow_fork (¤t_target, 2389 follow_child, detach_fork); 2390 } 2391 2392 /* Target wrapper for follow exec hook. */ 2393 2394 void 2395 target_follow_exec (struct inferior *inf, char *execd_pathname) 2396 { 2397 current_target.to_follow_exec (¤t_target, inf, execd_pathname); 2398 } 2399 2400 static void 2401 default_mourn_inferior (struct target_ops *self) 2402 { 2403 internal_error (__FILE__, __LINE__, 2404 _("could not find a target to follow mourn inferior")); 2405 } 2406 2407 void 2408 target_mourn_inferior (ptid_t ptid) 2409 { 2410 gdb_assert (ptid_equal (ptid, inferior_ptid)); 2411 current_target.to_mourn_inferior (¤t_target); 2412 2413 /* We no longer need to keep handles on any of the object files. 2414 Make sure to release them to avoid unnecessarily locking any 2415 of them while we're not actually debugging. */ 2416 bfd_cache_close_all (); 2417 } 2418 2419 /* Look for a target which can describe architectural features, starting 2420 from TARGET. If we find one, return its description. */ 2421 2422 const struct target_desc * 2423 target_read_description (struct target_ops *target) 2424 { 2425 return target->to_read_description (target); 2426 } 2427 2428 /* This implements a basic search of memory, reading target memory and 2429 performing the search here (as opposed to performing the search in on the 2430 target side with, for example, gdbserver). */ 2431 2432 int 2433 simple_search_memory (struct target_ops *ops, 2434 CORE_ADDR start_addr, ULONGEST search_space_len, 2435 const gdb_byte *pattern, ULONGEST pattern_len, 2436 CORE_ADDR *found_addrp) 2437 { 2438 /* NOTE: also defined in find.c testcase. */ 2439 #define SEARCH_CHUNK_SIZE 16000 2440 const unsigned chunk_size = SEARCH_CHUNK_SIZE; 2441 /* Buffer to hold memory contents for searching. */ 2442 gdb_byte *search_buf; 2443 unsigned search_buf_size; 2444 struct cleanup *old_cleanups; 2445 2446 search_buf_size = chunk_size + pattern_len - 1; 2447 2448 /* No point in trying to allocate a buffer larger than the search space. */ 2449 if (search_space_len < search_buf_size) 2450 search_buf_size = search_space_len; 2451 2452 search_buf = (gdb_byte *) malloc (search_buf_size); 2453 if (search_buf == NULL) 2454 error (_("Unable to allocate memory to perform the search.")); 2455 old_cleanups = make_cleanup (free_current_contents, &search_buf); 2456 2457 /* Prime the search buffer. */ 2458 2459 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, 2460 search_buf, start_addr, search_buf_size) != search_buf_size) 2461 { 2462 warning (_("Unable to access %s bytes of target " 2463 "memory at %s, halting search."), 2464 pulongest (search_buf_size), hex_string (start_addr)); 2465 do_cleanups (old_cleanups); 2466 return -1; 2467 } 2468 2469 /* Perform the search. 2470 2471 The loop is kept simple by allocating [N + pattern-length - 1] bytes. 2472 When we've scanned N bytes we copy the trailing bytes to the start and 2473 read in another N bytes. */ 2474 2475 while (search_space_len >= pattern_len) 2476 { 2477 gdb_byte *found_ptr; 2478 unsigned nr_search_bytes 2479 = std::min (search_space_len, (ULONGEST) search_buf_size); 2480 2481 found_ptr = (gdb_byte *) memmem (search_buf, nr_search_bytes, 2482 pattern, pattern_len); 2483 2484 if (found_ptr != NULL) 2485 { 2486 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf); 2487 2488 *found_addrp = found_addr; 2489 do_cleanups (old_cleanups); 2490 return 1; 2491 } 2492 2493 /* Not found in this chunk, skip to next chunk. */ 2494 2495 /* Don't let search_space_len wrap here, it's unsigned. */ 2496 if (search_space_len >= chunk_size) 2497 search_space_len -= chunk_size; 2498 else 2499 search_space_len = 0; 2500 2501 if (search_space_len >= pattern_len) 2502 { 2503 unsigned keep_len = search_buf_size - chunk_size; 2504 CORE_ADDR read_addr = start_addr + chunk_size + keep_len; 2505 int nr_to_read; 2506 2507 /* Copy the trailing part of the previous iteration to the front 2508 of the buffer for the next iteration. */ 2509 gdb_assert (keep_len == pattern_len - 1); 2510 memcpy (search_buf, search_buf + chunk_size, keep_len); 2511 2512 nr_to_read = std::min (search_space_len - keep_len, 2513 (ULONGEST) chunk_size); 2514 2515 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, 2516 search_buf + keep_len, read_addr, 2517 nr_to_read) != nr_to_read) 2518 { 2519 warning (_("Unable to access %s bytes of target " 2520 "memory at %s, halting search."), 2521 plongest (nr_to_read), 2522 hex_string (read_addr)); 2523 do_cleanups (old_cleanups); 2524 return -1; 2525 } 2526 2527 start_addr += chunk_size; 2528 } 2529 } 2530 2531 /* Not found. */ 2532 2533 do_cleanups (old_cleanups); 2534 return 0; 2535 } 2536 2537 /* Default implementation of memory-searching. */ 2538 2539 static int 2540 default_search_memory (struct target_ops *self, 2541 CORE_ADDR start_addr, ULONGEST search_space_len, 2542 const gdb_byte *pattern, ULONGEST pattern_len, 2543 CORE_ADDR *found_addrp) 2544 { 2545 /* Start over from the top of the target stack. */ 2546 return simple_search_memory (current_target.beneath, 2547 start_addr, search_space_len, 2548 pattern, pattern_len, found_addrp); 2549 } 2550 2551 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the 2552 sequence of bytes in PATTERN with length PATTERN_LEN. 2553 2554 The result is 1 if found, 0 if not found, and -1 if there was an error 2555 requiring halting of the search (e.g. memory read error). 2556 If the pattern is found the address is recorded in FOUND_ADDRP. */ 2557 2558 int 2559 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len, 2560 const gdb_byte *pattern, ULONGEST pattern_len, 2561 CORE_ADDR *found_addrp) 2562 { 2563 return current_target.to_search_memory (¤t_target, start_addr, 2564 search_space_len, 2565 pattern, pattern_len, found_addrp); 2566 } 2567 2568 /* Look through the currently pushed targets. If none of them will 2569 be able to restart the currently running process, issue an error 2570 message. */ 2571 2572 void 2573 target_require_runnable (void) 2574 { 2575 struct target_ops *t; 2576 2577 for (t = target_stack; t != NULL; t = t->beneath) 2578 { 2579 /* If this target knows how to create a new program, then 2580 assume we will still be able to after killing the current 2581 one. Either killing and mourning will not pop T, or else 2582 find_default_run_target will find it again. */ 2583 if (t->to_create_inferior != NULL) 2584 return; 2585 2586 /* Do not worry about targets at certain strata that can not 2587 create inferiors. Assume they will be pushed again if 2588 necessary, and continue to the process_stratum. */ 2589 if (t->to_stratum == thread_stratum 2590 || t->to_stratum == record_stratum 2591 || t->to_stratum == arch_stratum) 2592 continue; 2593 2594 error (_("The \"%s\" target does not support \"run\". " 2595 "Try \"help target\" or \"continue\"."), 2596 t->to_shortname); 2597 } 2598 2599 /* This function is only called if the target is running. In that 2600 case there should have been a process_stratum target and it 2601 should either know how to create inferiors, or not... */ 2602 internal_error (__FILE__, __LINE__, _("No targets found")); 2603 } 2604 2605 /* Whether GDB is allowed to fall back to the default run target for 2606 "run", "attach", etc. when no target is connected yet. */ 2607 static int auto_connect_native_target = 1; 2608 2609 static void 2610 show_auto_connect_native_target (struct ui_file *file, int from_tty, 2611 struct cmd_list_element *c, const char *value) 2612 { 2613 fprintf_filtered (file, 2614 _("Whether GDB may automatically connect to the " 2615 "native target is %s.\n"), 2616 value); 2617 } 2618 2619 /* Look through the list of possible targets for a target that can 2620 execute a run or attach command without any other data. This is 2621 used to locate the default process stratum. 2622 2623 If DO_MESG is not NULL, the result is always valid (error() is 2624 called for errors); else, return NULL on error. */ 2625 2626 static struct target_ops * 2627 find_default_run_target (const char *do_mesg) 2628 { 2629 struct target_ops *runable = NULL; 2630 2631 if (auto_connect_native_target) 2632 { 2633 struct target_ops *t; 2634 int count = 0; 2635 int i; 2636 2637 for (i = 0; VEC_iterate (target_ops_p, target_structs, i, t); ++i) 2638 { 2639 if (t->to_can_run != delegate_can_run && target_can_run (t)) 2640 { 2641 runable = t; 2642 ++count; 2643 } 2644 } 2645 2646 if (count != 1) 2647 runable = NULL; 2648 } 2649 2650 if (runable == NULL) 2651 { 2652 if (do_mesg) 2653 error (_("Don't know how to %s. Try \"help target\"."), do_mesg); 2654 else 2655 return NULL; 2656 } 2657 2658 return runable; 2659 } 2660 2661 /* See target.h. */ 2662 2663 struct target_ops * 2664 find_attach_target (void) 2665 { 2666 struct target_ops *t; 2667 2668 /* If a target on the current stack can attach, use it. */ 2669 for (t = current_target.beneath; t != NULL; t = t->beneath) 2670 { 2671 if (t->to_attach != NULL) 2672 break; 2673 } 2674 2675 /* Otherwise, use the default run target for attaching. */ 2676 if (t == NULL) 2677 t = find_default_run_target ("attach"); 2678 2679 return t; 2680 } 2681 2682 /* See target.h. */ 2683 2684 struct target_ops * 2685 find_run_target (void) 2686 { 2687 struct target_ops *t; 2688 2689 /* If a target on the current stack can attach, use it. */ 2690 for (t = current_target.beneath; t != NULL; t = t->beneath) 2691 { 2692 if (t->to_create_inferior != NULL) 2693 break; 2694 } 2695 2696 /* Otherwise, use the default run target. */ 2697 if (t == NULL) 2698 t = find_default_run_target ("run"); 2699 2700 return t; 2701 } 2702 2703 /* Implement the "info proc" command. */ 2704 2705 int 2706 target_info_proc (const char *args, enum info_proc_what what) 2707 { 2708 struct target_ops *t; 2709 2710 /* If we're already connected to something that can get us OS 2711 related data, use it. Otherwise, try using the native 2712 target. */ 2713 if (current_target.to_stratum >= process_stratum) 2714 t = current_target.beneath; 2715 else 2716 t = find_default_run_target (NULL); 2717 2718 for (; t != NULL; t = t->beneath) 2719 { 2720 if (t->to_info_proc != NULL) 2721 { 2722 t->to_info_proc (t, args, what); 2723 2724 if (targetdebug) 2725 fprintf_unfiltered (gdb_stdlog, 2726 "target_info_proc (\"%s\", %d)\n", args, what); 2727 2728 return 1; 2729 } 2730 } 2731 2732 return 0; 2733 } 2734 2735 static int 2736 find_default_supports_disable_randomization (struct target_ops *self) 2737 { 2738 struct target_ops *t; 2739 2740 t = find_default_run_target (NULL); 2741 if (t && t->to_supports_disable_randomization) 2742 return (t->to_supports_disable_randomization) (t); 2743 return 0; 2744 } 2745 2746 int 2747 target_supports_disable_randomization (void) 2748 { 2749 struct target_ops *t; 2750 2751 for (t = ¤t_target; t != NULL; t = t->beneath) 2752 if (t->to_supports_disable_randomization) 2753 return t->to_supports_disable_randomization (t); 2754 2755 return 0; 2756 } 2757 2758 /* See target/target.h. */ 2759 2760 int 2761 target_supports_multi_process (void) 2762 { 2763 return (*current_target.to_supports_multi_process) (¤t_target); 2764 } 2765 2766 char * 2767 target_get_osdata (const char *type) 2768 { 2769 struct target_ops *t; 2770 2771 /* If we're already connected to something that can get us OS 2772 related data, use it. Otherwise, try using the native 2773 target. */ 2774 if (current_target.to_stratum >= process_stratum) 2775 t = current_target.beneath; 2776 else 2777 t = find_default_run_target ("get OS data"); 2778 2779 if (!t) 2780 return NULL; 2781 2782 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type); 2783 } 2784 2785 static struct address_space * 2786 default_thread_address_space (struct target_ops *self, ptid_t ptid) 2787 { 2788 struct inferior *inf; 2789 2790 /* Fall-back to the "main" address space of the inferior. */ 2791 inf = find_inferior_ptid (ptid); 2792 2793 if (inf == NULL || inf->aspace == NULL) 2794 internal_error (__FILE__, __LINE__, 2795 _("Can't determine the current " 2796 "address space of thread %s\n"), 2797 target_pid_to_str (ptid)); 2798 2799 return inf->aspace; 2800 } 2801 2802 /* Determine the current address space of thread PTID. */ 2803 2804 struct address_space * 2805 target_thread_address_space (ptid_t ptid) 2806 { 2807 struct address_space *aspace; 2808 2809 aspace = current_target.to_thread_address_space (¤t_target, ptid); 2810 gdb_assert (aspace != NULL); 2811 2812 return aspace; 2813 } 2814 2815 2816 /* Target file operations. */ 2817 2818 static struct target_ops * 2819 default_fileio_target (void) 2820 { 2821 /* If we're already connected to something that can perform 2822 file I/O, use it. Otherwise, try using the native target. */ 2823 if (current_target.to_stratum >= process_stratum) 2824 return current_target.beneath; 2825 else 2826 return find_default_run_target ("file I/O"); 2827 } 2828 2829 /* File handle for target file operations. */ 2830 2831 typedef struct 2832 { 2833 /* The target on which this file is open. */ 2834 struct target_ops *t; 2835 2836 /* The file descriptor on the target. */ 2837 int fd; 2838 } fileio_fh_t; 2839 2840 DEF_VEC_O (fileio_fh_t); 2841 2842 /* Vector of currently open file handles. The value returned by 2843 target_fileio_open and passed as the FD argument to other 2844 target_fileio_* functions is an index into this vector. This 2845 vector's entries are never freed; instead, files are marked as 2846 closed, and the handle becomes available for reuse. */ 2847 static VEC (fileio_fh_t) *fileio_fhandles; 2848 2849 /* Macro to check whether a fileio_fh_t represents a closed file. */ 2850 #define is_closed_fileio_fh(fd) ((fd) < 0) 2851 2852 /* Index into fileio_fhandles of the lowest handle that might be 2853 closed. This permits handle reuse without searching the whole 2854 list each time a new file is opened. */ 2855 static int lowest_closed_fd; 2856 2857 /* Acquire a target fileio file descriptor. */ 2858 2859 static int 2860 acquire_fileio_fd (struct target_ops *t, int fd) 2861 { 2862 fileio_fh_t *fh; 2863 2864 gdb_assert (!is_closed_fileio_fh (fd)); 2865 2866 /* Search for closed handles to reuse. */ 2867 for (; 2868 VEC_iterate (fileio_fh_t, fileio_fhandles, 2869 lowest_closed_fd, fh); 2870 lowest_closed_fd++) 2871 if (is_closed_fileio_fh (fh->fd)) 2872 break; 2873 2874 /* Push a new handle if no closed handles were found. */ 2875 if (lowest_closed_fd == VEC_length (fileio_fh_t, fileio_fhandles)) 2876 fh = VEC_safe_push (fileio_fh_t, fileio_fhandles, NULL); 2877 2878 /* Fill in the handle. */ 2879 fh->t = t; 2880 fh->fd = fd; 2881 2882 /* Return its index, and start the next lookup at 2883 the next index. */ 2884 return lowest_closed_fd++; 2885 } 2886 2887 /* Release a target fileio file descriptor. */ 2888 2889 static void 2890 release_fileio_fd (int fd, fileio_fh_t *fh) 2891 { 2892 fh->fd = -1; 2893 lowest_closed_fd = std::min (lowest_closed_fd, fd); 2894 } 2895 2896 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */ 2897 2898 #define fileio_fd_to_fh(fd) \ 2899 VEC_index (fileio_fh_t, fileio_fhandles, (fd)) 2900 2901 /* Helper for target_fileio_open and 2902 target_fileio_open_warn_if_slow. */ 2903 2904 static int 2905 target_fileio_open_1 (struct inferior *inf, const char *filename, 2906 int flags, int mode, int warn_if_slow, 2907 int *target_errno) 2908 { 2909 struct target_ops *t; 2910 2911 for (t = default_fileio_target (); t != NULL; t = t->beneath) 2912 { 2913 if (t->to_fileio_open != NULL) 2914 { 2915 int fd = t->to_fileio_open (t, inf, filename, flags, mode, 2916 warn_if_slow, target_errno); 2917 2918 if (fd < 0) 2919 fd = -1; 2920 else 2921 fd = acquire_fileio_fd (t, fd); 2922 2923 if (targetdebug) 2924 fprintf_unfiltered (gdb_stdlog, 2925 "target_fileio_open (%d,%s,0x%x,0%o,%d)" 2926 " = %d (%d)\n", 2927 inf == NULL ? 0 : inf->num, 2928 filename, flags, mode, 2929 warn_if_slow, fd, 2930 fd != -1 ? 0 : *target_errno); 2931 return fd; 2932 } 2933 } 2934 2935 *target_errno = FILEIO_ENOSYS; 2936 return -1; 2937 } 2938 2939 /* See target.h. */ 2940 2941 int 2942 target_fileio_open (struct inferior *inf, const char *filename, 2943 int flags, int mode, int *target_errno) 2944 { 2945 return target_fileio_open_1 (inf, filename, flags, mode, 0, 2946 target_errno); 2947 } 2948 2949 /* See target.h. */ 2950 2951 int 2952 target_fileio_open_warn_if_slow (struct inferior *inf, 2953 const char *filename, 2954 int flags, int mode, int *target_errno) 2955 { 2956 return target_fileio_open_1 (inf, filename, flags, mode, 1, 2957 target_errno); 2958 } 2959 2960 /* See target.h. */ 2961 2962 int 2963 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len, 2964 ULONGEST offset, int *target_errno) 2965 { 2966 fileio_fh_t *fh = fileio_fd_to_fh (fd); 2967 int ret = -1; 2968 2969 if (is_closed_fileio_fh (fh->fd)) 2970 *target_errno = EBADF; 2971 else 2972 ret = fh->t->to_fileio_pwrite (fh->t, fh->fd, write_buf, 2973 len, offset, target_errno); 2974 2975 if (targetdebug) 2976 fprintf_unfiltered (gdb_stdlog, 2977 "target_fileio_pwrite (%d,...,%d,%s) " 2978 "= %d (%d)\n", 2979 fd, len, pulongest (offset), 2980 ret, ret != -1 ? 0 : *target_errno); 2981 return ret; 2982 } 2983 2984 /* See target.h. */ 2985 2986 int 2987 target_fileio_pread (int fd, gdb_byte *read_buf, int len, 2988 ULONGEST offset, int *target_errno) 2989 { 2990 fileio_fh_t *fh = fileio_fd_to_fh (fd); 2991 int ret = -1; 2992 2993 if (is_closed_fileio_fh (fh->fd)) 2994 *target_errno = EBADF; 2995 else 2996 ret = fh->t->to_fileio_pread (fh->t, fh->fd, read_buf, 2997 len, offset, target_errno); 2998 2999 if (targetdebug) 3000 fprintf_unfiltered (gdb_stdlog, 3001 "target_fileio_pread (%d,...,%d,%s) " 3002 "= %d (%d)\n", 3003 fd, len, pulongest (offset), 3004 ret, ret != -1 ? 0 : *target_errno); 3005 return ret; 3006 } 3007 3008 /* See target.h. */ 3009 3010 int 3011 target_fileio_fstat (int fd, struct stat *sb, int *target_errno) 3012 { 3013 fileio_fh_t *fh = fileio_fd_to_fh (fd); 3014 int ret = -1; 3015 3016 if (is_closed_fileio_fh (fh->fd)) 3017 *target_errno = EBADF; 3018 else 3019 ret = fh->t->to_fileio_fstat (fh->t, fh->fd, sb, target_errno); 3020 3021 if (targetdebug) 3022 fprintf_unfiltered (gdb_stdlog, 3023 "target_fileio_fstat (%d) = %d (%d)\n", 3024 fd, ret, ret != -1 ? 0 : *target_errno); 3025 return ret; 3026 } 3027 3028 /* See target.h. */ 3029 3030 int 3031 target_fileio_close (int fd, int *target_errno) 3032 { 3033 fileio_fh_t *fh = fileio_fd_to_fh (fd); 3034 int ret = -1; 3035 3036 if (is_closed_fileio_fh (fh->fd)) 3037 *target_errno = EBADF; 3038 else 3039 { 3040 ret = fh->t->to_fileio_close (fh->t, fh->fd, target_errno); 3041 release_fileio_fd (fd, fh); 3042 } 3043 3044 if (targetdebug) 3045 fprintf_unfiltered (gdb_stdlog, 3046 "target_fileio_close (%d) = %d (%d)\n", 3047 fd, ret, ret != -1 ? 0 : *target_errno); 3048 return ret; 3049 } 3050 3051 /* See target.h. */ 3052 3053 int 3054 target_fileio_unlink (struct inferior *inf, const char *filename, 3055 int *target_errno) 3056 { 3057 struct target_ops *t; 3058 3059 for (t = default_fileio_target (); t != NULL; t = t->beneath) 3060 { 3061 if (t->to_fileio_unlink != NULL) 3062 { 3063 int ret = t->to_fileio_unlink (t, inf, filename, 3064 target_errno); 3065 3066 if (targetdebug) 3067 fprintf_unfiltered (gdb_stdlog, 3068 "target_fileio_unlink (%d,%s)" 3069 " = %d (%d)\n", 3070 inf == NULL ? 0 : inf->num, filename, 3071 ret, ret != -1 ? 0 : *target_errno); 3072 return ret; 3073 } 3074 } 3075 3076 *target_errno = FILEIO_ENOSYS; 3077 return -1; 3078 } 3079 3080 /* See target.h. */ 3081 3082 char * 3083 target_fileio_readlink (struct inferior *inf, const char *filename, 3084 int *target_errno) 3085 { 3086 struct target_ops *t; 3087 3088 for (t = default_fileio_target (); t != NULL; t = t->beneath) 3089 { 3090 if (t->to_fileio_readlink != NULL) 3091 { 3092 char *ret = t->to_fileio_readlink (t, inf, filename, 3093 target_errno); 3094 3095 if (targetdebug) 3096 fprintf_unfiltered (gdb_stdlog, 3097 "target_fileio_readlink (%d,%s)" 3098 " = %s (%d)\n", 3099 inf == NULL ? 0 : inf->num, 3100 filename, ret? ret : "(nil)", 3101 ret? 0 : *target_errno); 3102 return ret; 3103 } 3104 } 3105 3106 *target_errno = FILEIO_ENOSYS; 3107 return NULL; 3108 } 3109 3110 static void 3111 target_fileio_close_cleanup (void *opaque) 3112 { 3113 int fd = *(int *) opaque; 3114 int target_errno; 3115 3116 target_fileio_close (fd, &target_errno); 3117 } 3118 3119 /* Read target file FILENAME, in the filesystem as seen by INF. If 3120 INF is NULL, use the filesystem seen by the debugger (GDB or, for 3121 remote targets, the remote stub). Store the result in *BUF_P and 3122 return the size of the transferred data. PADDING additional bytes 3123 are available in *BUF_P. This is a helper function for 3124 target_fileio_read_alloc; see the declaration of that function for 3125 more information. */ 3126 3127 static LONGEST 3128 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename, 3129 gdb_byte **buf_p, int padding) 3130 { 3131 struct cleanup *close_cleanup; 3132 size_t buf_alloc, buf_pos; 3133 gdb_byte *buf; 3134 LONGEST n; 3135 int fd; 3136 int target_errno; 3137 3138 fd = target_fileio_open (inf, filename, FILEIO_O_RDONLY, 0700, 3139 &target_errno); 3140 if (fd == -1) 3141 return -1; 3142 3143 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd); 3144 3145 /* Start by reading up to 4K at a time. The target will throttle 3146 this number down if necessary. */ 3147 buf_alloc = 4096; 3148 buf = (gdb_byte *) xmalloc (buf_alloc); 3149 buf_pos = 0; 3150 while (1) 3151 { 3152 n = target_fileio_pread (fd, &buf[buf_pos], 3153 buf_alloc - buf_pos - padding, buf_pos, 3154 &target_errno); 3155 if (n < 0) 3156 { 3157 /* An error occurred. */ 3158 do_cleanups (close_cleanup); 3159 xfree (buf); 3160 return -1; 3161 } 3162 else if (n == 0) 3163 { 3164 /* Read all there was. */ 3165 do_cleanups (close_cleanup); 3166 if (buf_pos == 0) 3167 xfree (buf); 3168 else 3169 *buf_p = buf; 3170 return buf_pos; 3171 } 3172 3173 buf_pos += n; 3174 3175 /* If the buffer is filling up, expand it. */ 3176 if (buf_alloc < buf_pos * 2) 3177 { 3178 buf_alloc *= 2; 3179 buf = (gdb_byte *) xrealloc (buf, buf_alloc); 3180 } 3181 3182 QUIT; 3183 } 3184 } 3185 3186 /* See target.h. */ 3187 3188 LONGEST 3189 target_fileio_read_alloc (struct inferior *inf, const char *filename, 3190 gdb_byte **buf_p) 3191 { 3192 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0); 3193 } 3194 3195 /* See target.h. */ 3196 3197 char * 3198 target_fileio_read_stralloc (struct inferior *inf, const char *filename) 3199 { 3200 gdb_byte *buffer; 3201 char *bufstr; 3202 LONGEST i, transferred; 3203 3204 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1); 3205 bufstr = (char *) buffer; 3206 3207 if (transferred < 0) 3208 return NULL; 3209 3210 if (transferred == 0) 3211 return xstrdup (""); 3212 3213 bufstr[transferred] = 0; 3214 3215 /* Check for embedded NUL bytes; but allow trailing NULs. */ 3216 for (i = strlen (bufstr); i < transferred; i++) 3217 if (bufstr[i] != 0) 3218 { 3219 warning (_("target file %s " 3220 "contained unexpected null characters"), 3221 filename); 3222 break; 3223 } 3224 3225 return bufstr; 3226 } 3227 3228 3229 static int 3230 default_region_ok_for_hw_watchpoint (struct target_ops *self, 3231 CORE_ADDR addr, int len) 3232 { 3233 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT); 3234 } 3235 3236 static int 3237 default_watchpoint_addr_within_range (struct target_ops *target, 3238 CORE_ADDR addr, 3239 CORE_ADDR start, int length) 3240 { 3241 return addr >= start && addr < start + length; 3242 } 3243 3244 static struct gdbarch * 3245 default_thread_architecture (struct target_ops *ops, ptid_t ptid) 3246 { 3247 return target_gdbarch (); 3248 } 3249 3250 static int 3251 return_zero (struct target_ops *ignore) 3252 { 3253 return 0; 3254 } 3255 3256 static int 3257 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2) 3258 { 3259 return 0; 3260 } 3261 3262 /* 3263 * Find the next target down the stack from the specified target. 3264 */ 3265 3266 struct target_ops * 3267 find_target_beneath (struct target_ops *t) 3268 { 3269 return t->beneath; 3270 } 3271 3272 /* See target.h. */ 3273 3274 struct target_ops * 3275 find_target_at (enum strata stratum) 3276 { 3277 struct target_ops *t; 3278 3279 for (t = current_target.beneath; t != NULL; t = t->beneath) 3280 if (t->to_stratum == stratum) 3281 return t; 3282 3283 return NULL; 3284 } 3285 3286 3287 3288 /* See target.h */ 3289 3290 void 3291 target_announce_detach (int from_tty) 3292 { 3293 pid_t pid; 3294 const char *exec_file; 3295 3296 if (!from_tty) 3297 return; 3298 3299 exec_file = get_exec_file (0); 3300 if (exec_file == NULL) 3301 exec_file = ""; 3302 3303 pid = ptid_get_pid (inferior_ptid); 3304 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file, 3305 target_pid_to_str (pid_to_ptid (pid))); 3306 gdb_flush (gdb_stdout); 3307 } 3308 3309 /* The inferior process has died. Long live the inferior! */ 3310 3311 void 3312 generic_mourn_inferior (void) 3313 { 3314 ptid_t ptid; 3315 3316 ptid = inferior_ptid; 3317 inferior_ptid = null_ptid; 3318 3319 /* Mark breakpoints uninserted in case something tries to delete a 3320 breakpoint while we delete the inferior's threads (which would 3321 fail, since the inferior is long gone). */ 3322 mark_breakpoints_out (); 3323 3324 if (!ptid_equal (ptid, null_ptid)) 3325 { 3326 int pid = ptid_get_pid (ptid); 3327 exit_inferior (pid); 3328 } 3329 3330 /* Note this wipes step-resume breakpoints, so needs to be done 3331 after exit_inferior, which ends up referencing the step-resume 3332 breakpoints through clear_thread_inferior_resources. */ 3333 breakpoint_init_inferior (inf_exited); 3334 3335 registers_changed (); 3336 3337 reopen_exec_file (); 3338 reinit_frame_cache (); 3339 3340 if (deprecated_detach_hook) 3341 deprecated_detach_hook (); 3342 } 3343 3344 /* Convert a normal process ID to a string. Returns the string in a 3345 static buffer. */ 3346 3347 const char * 3348 normal_pid_to_str (ptid_t ptid) 3349 { 3350 static char buf[32]; 3351 3352 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid)); 3353 return buf; 3354 } 3355 3356 static const char * 3357 default_pid_to_str (struct target_ops *ops, ptid_t ptid) 3358 { 3359 return normal_pid_to_str (ptid); 3360 } 3361 3362 /* Error-catcher for target_find_memory_regions. */ 3363 static int 3364 dummy_find_memory_regions (struct target_ops *self, 3365 find_memory_region_ftype ignore1, void *ignore2) 3366 { 3367 error (_("Command not implemented for this target.")); 3368 return 0; 3369 } 3370 3371 /* Error-catcher for target_make_corefile_notes. */ 3372 static char * 3373 dummy_make_corefile_notes (struct target_ops *self, 3374 bfd *ignore1, int *ignore2) 3375 { 3376 error (_("Command not implemented for this target.")); 3377 return NULL; 3378 } 3379 3380 /* Set up the handful of non-empty slots needed by the dummy target 3381 vector. */ 3382 3383 static void 3384 init_dummy_target (void) 3385 { 3386 dummy_target.to_shortname = "None"; 3387 dummy_target.to_longname = "None"; 3388 dummy_target.to_doc = ""; 3389 dummy_target.to_supports_disable_randomization 3390 = find_default_supports_disable_randomization; 3391 dummy_target.to_stratum = dummy_stratum; 3392 dummy_target.to_has_all_memory = return_zero; 3393 dummy_target.to_has_memory = return_zero; 3394 dummy_target.to_has_stack = return_zero; 3395 dummy_target.to_has_registers = return_zero; 3396 dummy_target.to_has_execution = return_zero_has_execution; 3397 dummy_target.to_magic = OPS_MAGIC; 3398 3399 install_dummy_methods (&dummy_target); 3400 } 3401 3402 3403 void 3404 target_close (struct target_ops *targ) 3405 { 3406 gdb_assert (!target_is_pushed (targ)); 3407 3408 if (targ->to_xclose != NULL) 3409 targ->to_xclose (targ); 3410 else if (targ->to_close != NULL) 3411 targ->to_close (targ); 3412 3413 if (targetdebug) 3414 fprintf_unfiltered (gdb_stdlog, "target_close ()\n"); 3415 } 3416 3417 int 3418 target_thread_alive (ptid_t ptid) 3419 { 3420 return current_target.to_thread_alive (¤t_target, ptid); 3421 } 3422 3423 void 3424 target_update_thread_list (void) 3425 { 3426 current_target.to_update_thread_list (¤t_target); 3427 } 3428 3429 void 3430 target_stop (ptid_t ptid) 3431 { 3432 if (!may_stop) 3433 { 3434 warning (_("May not interrupt or stop the target, ignoring attempt")); 3435 return; 3436 } 3437 3438 (*current_target.to_stop) (¤t_target, ptid); 3439 } 3440 3441 void 3442 target_interrupt (ptid_t ptid) 3443 { 3444 if (!may_stop) 3445 { 3446 warning (_("May not interrupt or stop the target, ignoring attempt")); 3447 return; 3448 } 3449 3450 (*current_target.to_interrupt) (¤t_target, ptid); 3451 } 3452 3453 /* See target.h. */ 3454 3455 void 3456 target_pass_ctrlc (void) 3457 { 3458 (*current_target.to_pass_ctrlc) (¤t_target); 3459 } 3460 3461 /* See target.h. */ 3462 3463 void 3464 default_target_pass_ctrlc (struct target_ops *ops) 3465 { 3466 target_interrupt (inferior_ptid); 3467 } 3468 3469 /* See target/target.h. */ 3470 3471 void 3472 target_stop_and_wait (ptid_t ptid) 3473 { 3474 struct target_waitstatus status; 3475 int was_non_stop = non_stop; 3476 3477 non_stop = 1; 3478 target_stop (ptid); 3479 3480 memset (&status, 0, sizeof (status)); 3481 target_wait (ptid, &status, 0); 3482 3483 non_stop = was_non_stop; 3484 } 3485 3486 /* See target/target.h. */ 3487 3488 void 3489 target_continue_no_signal (ptid_t ptid) 3490 { 3491 target_resume (ptid, 0, GDB_SIGNAL_0); 3492 } 3493 3494 /* See target/target.h. */ 3495 3496 void 3497 target_continue (ptid_t ptid, enum gdb_signal signal) 3498 { 3499 target_resume (ptid, 0, signal); 3500 } 3501 3502 /* Concatenate ELEM to LIST, a comma separate list, and return the 3503 result. The LIST incoming argument is released. */ 3504 3505 static char * 3506 str_comma_list_concat_elem (char *list, const char *elem) 3507 { 3508 if (list == NULL) 3509 return xstrdup (elem); 3510 else 3511 return reconcat (list, list, ", ", elem, (char *) NULL); 3512 } 3513 3514 /* Helper for target_options_to_string. If OPT is present in 3515 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET. 3516 Returns the new resulting string. OPT is removed from 3517 TARGET_OPTIONS. */ 3518 3519 static char * 3520 do_option (int *target_options, char *ret, 3521 int opt, const char *opt_str) 3522 { 3523 if ((*target_options & opt) != 0) 3524 { 3525 ret = str_comma_list_concat_elem (ret, opt_str); 3526 *target_options &= ~opt; 3527 } 3528 3529 return ret; 3530 } 3531 3532 char * 3533 target_options_to_string (int target_options) 3534 { 3535 char *ret = NULL; 3536 3537 #define DO_TARG_OPTION(OPT) \ 3538 ret = do_option (&target_options, ret, OPT, #OPT) 3539 3540 DO_TARG_OPTION (TARGET_WNOHANG); 3541 3542 if (target_options != 0) 3543 ret = str_comma_list_concat_elem (ret, "unknown???"); 3544 3545 if (ret == NULL) 3546 ret = xstrdup (""); 3547 return ret; 3548 } 3549 3550 void 3551 target_fetch_registers (struct regcache *regcache, int regno) 3552 { 3553 current_target.to_fetch_registers (¤t_target, regcache, regno); 3554 if (targetdebug) 3555 regcache_debug_print_register ("target_fetch_registers", regcache, regno); 3556 } 3557 3558 void 3559 target_store_registers (struct regcache *regcache, int regno) 3560 { 3561 if (!may_write_registers) 3562 error (_("Writing to registers is not allowed (regno %d)"), regno); 3563 3564 current_target.to_store_registers (¤t_target, regcache, regno); 3565 if (targetdebug) 3566 { 3567 regcache_debug_print_register ("target_store_registers", regcache, 3568 regno); 3569 } 3570 } 3571 3572 int 3573 target_core_of_thread (ptid_t ptid) 3574 { 3575 return current_target.to_core_of_thread (¤t_target, ptid); 3576 } 3577 3578 int 3579 simple_verify_memory (struct target_ops *ops, 3580 const gdb_byte *data, CORE_ADDR lma, ULONGEST size) 3581 { 3582 LONGEST total_xfered = 0; 3583 3584 while (total_xfered < size) 3585 { 3586 ULONGEST xfered_len; 3587 enum target_xfer_status status; 3588 gdb_byte buf[1024]; 3589 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered); 3590 3591 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL, 3592 buf, NULL, lma + total_xfered, howmuch, 3593 &xfered_len); 3594 if (status == TARGET_XFER_OK 3595 && memcmp (data + total_xfered, buf, xfered_len) == 0) 3596 { 3597 total_xfered += xfered_len; 3598 QUIT; 3599 } 3600 else 3601 return 0; 3602 } 3603 return 1; 3604 } 3605 3606 /* Default implementation of memory verification. */ 3607 3608 static int 3609 default_verify_memory (struct target_ops *self, 3610 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size) 3611 { 3612 /* Start over from the top of the target stack. */ 3613 return simple_verify_memory (current_target.beneath, 3614 data, memaddr, size); 3615 } 3616 3617 int 3618 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size) 3619 { 3620 return current_target.to_verify_memory (¤t_target, 3621 data, memaddr, size); 3622 } 3623 3624 /* The documentation for this function is in its prototype declaration in 3625 target.h. */ 3626 3627 int 3628 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, 3629 enum target_hw_bp_type rw) 3630 { 3631 return current_target.to_insert_mask_watchpoint (¤t_target, 3632 addr, mask, rw); 3633 } 3634 3635 /* The documentation for this function is in its prototype declaration in 3636 target.h. */ 3637 3638 int 3639 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, 3640 enum target_hw_bp_type rw) 3641 { 3642 return current_target.to_remove_mask_watchpoint (¤t_target, 3643 addr, mask, rw); 3644 } 3645 3646 /* The documentation for this function is in its prototype declaration 3647 in target.h. */ 3648 3649 int 3650 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask) 3651 { 3652 return current_target.to_masked_watch_num_registers (¤t_target, 3653 addr, mask); 3654 } 3655 3656 /* The documentation for this function is in its prototype declaration 3657 in target.h. */ 3658 3659 int 3660 target_ranged_break_num_registers (void) 3661 { 3662 return current_target.to_ranged_break_num_registers (¤t_target); 3663 } 3664 3665 /* See target.h. */ 3666 3667 int 3668 target_supports_btrace (enum btrace_format format) 3669 { 3670 return current_target.to_supports_btrace (¤t_target, format); 3671 } 3672 3673 /* See target.h. */ 3674 3675 struct btrace_target_info * 3676 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf) 3677 { 3678 return current_target.to_enable_btrace (¤t_target, ptid, conf); 3679 } 3680 3681 /* See target.h. */ 3682 3683 void 3684 target_disable_btrace (struct btrace_target_info *btinfo) 3685 { 3686 current_target.to_disable_btrace (¤t_target, btinfo); 3687 } 3688 3689 /* See target.h. */ 3690 3691 void 3692 target_teardown_btrace (struct btrace_target_info *btinfo) 3693 { 3694 current_target.to_teardown_btrace (¤t_target, btinfo); 3695 } 3696 3697 /* See target.h. */ 3698 3699 enum btrace_error 3700 target_read_btrace (struct btrace_data *btrace, 3701 struct btrace_target_info *btinfo, 3702 enum btrace_read_type type) 3703 { 3704 return current_target.to_read_btrace (¤t_target, btrace, btinfo, type); 3705 } 3706 3707 /* See target.h. */ 3708 3709 const struct btrace_config * 3710 target_btrace_conf (const struct btrace_target_info *btinfo) 3711 { 3712 return current_target.to_btrace_conf (¤t_target, btinfo); 3713 } 3714 3715 /* See target.h. */ 3716 3717 void 3718 target_stop_recording (void) 3719 { 3720 current_target.to_stop_recording (¤t_target); 3721 } 3722 3723 /* See target.h. */ 3724 3725 void 3726 target_save_record (const char *filename) 3727 { 3728 current_target.to_save_record (¤t_target, filename); 3729 } 3730 3731 /* See target.h. */ 3732 3733 int 3734 target_supports_delete_record (void) 3735 { 3736 struct target_ops *t; 3737 3738 for (t = current_target.beneath; t != NULL; t = t->beneath) 3739 if (t->to_delete_record != delegate_delete_record 3740 && t->to_delete_record != tdefault_delete_record) 3741 return 1; 3742 3743 return 0; 3744 } 3745 3746 /* See target.h. */ 3747 3748 void 3749 target_delete_record (void) 3750 { 3751 current_target.to_delete_record (¤t_target); 3752 } 3753 3754 /* See target.h. */ 3755 3756 enum record_method 3757 target_record_method (ptid_t ptid) 3758 { 3759 return current_target.to_record_method (¤t_target, ptid); 3760 } 3761 3762 /* See target.h. */ 3763 3764 int 3765 target_record_is_replaying (ptid_t ptid) 3766 { 3767 return current_target.to_record_is_replaying (¤t_target, ptid); 3768 } 3769 3770 /* See target.h. */ 3771 3772 int 3773 target_record_will_replay (ptid_t ptid, int dir) 3774 { 3775 return current_target.to_record_will_replay (¤t_target, ptid, dir); 3776 } 3777 3778 /* See target.h. */ 3779 3780 void 3781 target_record_stop_replaying (void) 3782 { 3783 current_target.to_record_stop_replaying (¤t_target); 3784 } 3785 3786 /* See target.h. */ 3787 3788 void 3789 target_goto_record_begin (void) 3790 { 3791 current_target.to_goto_record_begin (¤t_target); 3792 } 3793 3794 /* See target.h. */ 3795 3796 void 3797 target_goto_record_end (void) 3798 { 3799 current_target.to_goto_record_end (¤t_target); 3800 } 3801 3802 /* See target.h. */ 3803 3804 void 3805 target_goto_record (ULONGEST insn) 3806 { 3807 current_target.to_goto_record (¤t_target, insn); 3808 } 3809 3810 /* See target.h. */ 3811 3812 void 3813 target_insn_history (int size, int flags) 3814 { 3815 current_target.to_insn_history (¤t_target, size, flags); 3816 } 3817 3818 /* See target.h. */ 3819 3820 void 3821 target_insn_history_from (ULONGEST from, int size, int flags) 3822 { 3823 current_target.to_insn_history_from (¤t_target, from, size, flags); 3824 } 3825 3826 /* See target.h. */ 3827 3828 void 3829 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags) 3830 { 3831 current_target.to_insn_history_range (¤t_target, begin, end, flags); 3832 } 3833 3834 /* See target.h. */ 3835 3836 void 3837 target_call_history (int size, int flags) 3838 { 3839 current_target.to_call_history (¤t_target, size, flags); 3840 } 3841 3842 /* See target.h. */ 3843 3844 void 3845 target_call_history_from (ULONGEST begin, int size, int flags) 3846 { 3847 current_target.to_call_history_from (¤t_target, begin, size, flags); 3848 } 3849 3850 /* See target.h. */ 3851 3852 void 3853 target_call_history_range (ULONGEST begin, ULONGEST end, int flags) 3854 { 3855 current_target.to_call_history_range (¤t_target, begin, end, flags); 3856 } 3857 3858 /* See target.h. */ 3859 3860 const struct frame_unwind * 3861 target_get_unwinder (void) 3862 { 3863 return current_target.to_get_unwinder (¤t_target); 3864 } 3865 3866 /* See target.h. */ 3867 3868 const struct frame_unwind * 3869 target_get_tailcall_unwinder (void) 3870 { 3871 return current_target.to_get_tailcall_unwinder (¤t_target); 3872 } 3873 3874 /* See target.h. */ 3875 3876 void 3877 target_prepare_to_generate_core (void) 3878 { 3879 current_target.to_prepare_to_generate_core (¤t_target); 3880 } 3881 3882 /* See target.h. */ 3883 3884 void 3885 target_done_generating_core (void) 3886 { 3887 current_target.to_done_generating_core (¤t_target); 3888 } 3889 3890 static void 3891 setup_target_debug (void) 3892 { 3893 memcpy (&debug_target, ¤t_target, sizeof debug_target); 3894 3895 init_debug_target (¤t_target); 3896 } 3897 3898 3899 static char targ_desc[] = 3900 "Names of targets and files being debugged.\nShows the entire \ 3901 stack of targets currently in use (including the exec-file,\n\ 3902 core-file, and process, if any), as well as the symbol file name."; 3903 3904 static void 3905 default_rcmd (struct target_ops *self, const char *command, 3906 struct ui_file *output) 3907 { 3908 error (_("\"monitor\" command not supported by this target.")); 3909 } 3910 3911 static void 3912 do_monitor_command (char *cmd, 3913 int from_tty) 3914 { 3915 target_rcmd (cmd, gdb_stdtarg); 3916 } 3917 3918 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are 3919 ignored. */ 3920 3921 void 3922 flash_erase_command (char *cmd, int from_tty) 3923 { 3924 /* Used to communicate termination of flash operations to the target. */ 3925 bool found_flash_region = false; 3926 struct mem_region *m; 3927 struct gdbarch *gdbarch = target_gdbarch (); 3928 3929 VEC(mem_region_s) *mem_regions = target_memory_map (); 3930 3931 /* Iterate over all memory regions. */ 3932 for (int i = 0; VEC_iterate (mem_region_s, mem_regions, i, m); i++) 3933 { 3934 /* Fetch the memory attribute. */ 3935 struct mem_attrib *attrib = &m->attrib; 3936 3937 /* Is this a flash memory region? */ 3938 if (attrib->mode == MEM_FLASH) 3939 { 3940 found_flash_region = true; 3941 target_flash_erase (m->lo, m->hi - m->lo); 3942 3943 struct cleanup *cleanup_tuple 3944 = make_cleanup_ui_out_tuple_begin_end (current_uiout, 3945 "erased-regions"); 3946 3947 current_uiout->message (_("Erasing flash memory region at address ")); 3948 current_uiout->field_fmt ("address", "%s", paddress (gdbarch, 3949 m->lo)); 3950 current_uiout->message (", size = "); 3951 current_uiout->field_fmt ("size", "%s", hex_string (m->hi - m->lo)); 3952 current_uiout->message ("\n"); 3953 do_cleanups (cleanup_tuple); 3954 } 3955 } 3956 3957 /* Did we do any flash operations? If so, we need to finalize them. */ 3958 if (found_flash_region) 3959 target_flash_done (); 3960 else 3961 current_uiout->message (_("No flash memory regions found.\n")); 3962 } 3963 3964 /* Print the name of each layers of our target stack. */ 3965 3966 static void 3967 maintenance_print_target_stack (char *cmd, int from_tty) 3968 { 3969 struct target_ops *t; 3970 3971 printf_filtered (_("The current target stack is:\n")); 3972 3973 for (t = target_stack; t != NULL; t = t->beneath) 3974 { 3975 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname); 3976 } 3977 } 3978 3979 /* See target.h. */ 3980 3981 void 3982 target_async (int enable) 3983 { 3984 infrun_async (enable); 3985 current_target.to_async (¤t_target, enable); 3986 } 3987 3988 /* See target.h. */ 3989 3990 void 3991 target_thread_events (int enable) 3992 { 3993 current_target.to_thread_events (¤t_target, enable); 3994 } 3995 3996 /* Controls if targets can report that they can/are async. This is 3997 just for maintainers to use when debugging gdb. */ 3998 int target_async_permitted = 1; 3999 4000 /* The set command writes to this variable. If the inferior is 4001 executing, target_async_permitted is *not* updated. */ 4002 static int target_async_permitted_1 = 1; 4003 4004 static void 4005 maint_set_target_async_command (char *args, int from_tty, 4006 struct cmd_list_element *c) 4007 { 4008 if (have_live_inferiors ()) 4009 { 4010 target_async_permitted_1 = target_async_permitted; 4011 error (_("Cannot change this setting while the inferior is running.")); 4012 } 4013 4014 target_async_permitted = target_async_permitted_1; 4015 } 4016 4017 static void 4018 maint_show_target_async_command (struct ui_file *file, int from_tty, 4019 struct cmd_list_element *c, 4020 const char *value) 4021 { 4022 fprintf_filtered (file, 4023 _("Controlling the inferior in " 4024 "asynchronous mode is %s.\n"), value); 4025 } 4026 4027 /* Return true if the target operates in non-stop mode even with "set 4028 non-stop off". */ 4029 4030 static int 4031 target_always_non_stop_p (void) 4032 { 4033 return current_target.to_always_non_stop_p (¤t_target); 4034 } 4035 4036 /* See target.h. */ 4037 4038 int 4039 target_is_non_stop_p (void) 4040 { 4041 return (non_stop 4042 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE 4043 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO 4044 && target_always_non_stop_p ())); 4045 } 4046 4047 /* Controls if targets can report that they always run in non-stop 4048 mode. This is just for maintainers to use when debugging gdb. */ 4049 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO; 4050 4051 /* The set command writes to this variable. If the inferior is 4052 executing, target_non_stop_enabled is *not* updated. */ 4053 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO; 4054 4055 /* Implementation of "maint set target-non-stop". */ 4056 4057 static void 4058 maint_set_target_non_stop_command (char *args, int from_tty, 4059 struct cmd_list_element *c) 4060 { 4061 if (have_live_inferiors ()) 4062 { 4063 target_non_stop_enabled_1 = target_non_stop_enabled; 4064 error (_("Cannot change this setting while the inferior is running.")); 4065 } 4066 4067 target_non_stop_enabled = target_non_stop_enabled_1; 4068 } 4069 4070 /* Implementation of "maint show target-non-stop". */ 4071 4072 static void 4073 maint_show_target_non_stop_command (struct ui_file *file, int from_tty, 4074 struct cmd_list_element *c, 4075 const char *value) 4076 { 4077 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO) 4078 fprintf_filtered (file, 4079 _("Whether the target is always in non-stop mode " 4080 "is %s (currently %s).\n"), value, 4081 target_always_non_stop_p () ? "on" : "off"); 4082 else 4083 fprintf_filtered (file, 4084 _("Whether the target is always in non-stop mode " 4085 "is %s.\n"), value); 4086 } 4087 4088 /* Temporary copies of permission settings. */ 4089 4090 static int may_write_registers_1 = 1; 4091 static int may_write_memory_1 = 1; 4092 static int may_insert_breakpoints_1 = 1; 4093 static int may_insert_tracepoints_1 = 1; 4094 static int may_insert_fast_tracepoints_1 = 1; 4095 static int may_stop_1 = 1; 4096 4097 /* Make the user-set values match the real values again. */ 4098 4099 void 4100 update_target_permissions (void) 4101 { 4102 may_write_registers_1 = may_write_registers; 4103 may_write_memory_1 = may_write_memory; 4104 may_insert_breakpoints_1 = may_insert_breakpoints; 4105 may_insert_tracepoints_1 = may_insert_tracepoints; 4106 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints; 4107 may_stop_1 = may_stop; 4108 } 4109 4110 /* The one function handles (most of) the permission flags in the same 4111 way. */ 4112 4113 static void 4114 set_target_permissions (char *args, int from_tty, 4115 struct cmd_list_element *c) 4116 { 4117 if (target_has_execution) 4118 { 4119 update_target_permissions (); 4120 error (_("Cannot change this setting while the inferior is running.")); 4121 } 4122 4123 /* Make the real values match the user-changed values. */ 4124 may_write_registers = may_write_registers_1; 4125 may_insert_breakpoints = may_insert_breakpoints_1; 4126 may_insert_tracepoints = may_insert_tracepoints_1; 4127 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1; 4128 may_stop = may_stop_1; 4129 update_observer_mode (); 4130 } 4131 4132 /* Set memory write permission independently of observer mode. */ 4133 4134 static void 4135 set_write_memory_permission (char *args, int from_tty, 4136 struct cmd_list_element *c) 4137 { 4138 /* Make the real values match the user-changed values. */ 4139 may_write_memory = may_write_memory_1; 4140 update_observer_mode (); 4141 } 4142 4143 4144 void 4145 initialize_targets (void) 4146 { 4147 init_dummy_target (); 4148 push_target (&dummy_target); 4149 4150 add_info ("target", target_info, targ_desc); 4151 add_info ("files", target_info, targ_desc); 4152 4153 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\ 4154 Set target debugging."), _("\ 4155 Show target debugging."), _("\ 4156 When non-zero, target debugging is enabled. Higher numbers are more\n\ 4157 verbose."), 4158 set_targetdebug, 4159 show_targetdebug, 4160 &setdebuglist, &showdebuglist); 4161 4162 add_setshow_boolean_cmd ("trust-readonly-sections", class_support, 4163 &trust_readonly, _("\ 4164 Set mode for reading from readonly sections."), _("\ 4165 Show mode for reading from readonly sections."), _("\ 4166 When this mode is on, memory reads from readonly sections (such as .text)\n\ 4167 will be read from the object file instead of from the target. This will\n\ 4168 result in significant performance improvement for remote targets."), 4169 NULL, 4170 show_trust_readonly, 4171 &setlist, &showlist); 4172 4173 add_com ("monitor", class_obscure, do_monitor_command, 4174 _("Send a command to the remote monitor (remote targets only).")); 4175 4176 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack, 4177 _("Print the name of each layer of the internal target stack."), 4178 &maintenanceprintlist); 4179 4180 add_setshow_boolean_cmd ("target-async", no_class, 4181 &target_async_permitted_1, _("\ 4182 Set whether gdb controls the inferior in asynchronous mode."), _("\ 4183 Show whether gdb controls the inferior in asynchronous mode."), _("\ 4184 Tells gdb whether to control the inferior in asynchronous mode."), 4185 maint_set_target_async_command, 4186 maint_show_target_async_command, 4187 &maintenance_set_cmdlist, 4188 &maintenance_show_cmdlist); 4189 4190 add_setshow_auto_boolean_cmd ("target-non-stop", no_class, 4191 &target_non_stop_enabled_1, _("\ 4192 Set whether gdb always controls the inferior in non-stop mode."), _("\ 4193 Show whether gdb always controls the inferior in non-stop mode."), _("\ 4194 Tells gdb whether to control the inferior in non-stop mode."), 4195 maint_set_target_non_stop_command, 4196 maint_show_target_non_stop_command, 4197 &maintenance_set_cmdlist, 4198 &maintenance_show_cmdlist); 4199 4200 add_setshow_boolean_cmd ("may-write-registers", class_support, 4201 &may_write_registers_1, _("\ 4202 Set permission to write into registers."), _("\ 4203 Show permission to write into registers."), _("\ 4204 When this permission is on, GDB may write into the target's registers.\n\ 4205 Otherwise, any sort of write attempt will result in an error."), 4206 set_target_permissions, NULL, 4207 &setlist, &showlist); 4208 4209 add_setshow_boolean_cmd ("may-write-memory", class_support, 4210 &may_write_memory_1, _("\ 4211 Set permission to write into target memory."), _("\ 4212 Show permission to write into target memory."), _("\ 4213 When this permission is on, GDB may write into the target's memory.\n\ 4214 Otherwise, any sort of write attempt will result in an error."), 4215 set_write_memory_permission, NULL, 4216 &setlist, &showlist); 4217 4218 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support, 4219 &may_insert_breakpoints_1, _("\ 4220 Set permission to insert breakpoints in the target."), _("\ 4221 Show permission to insert breakpoints in the target."), _("\ 4222 When this permission is on, GDB may insert breakpoints in the program.\n\ 4223 Otherwise, any sort of insertion attempt will result in an error."), 4224 set_target_permissions, NULL, 4225 &setlist, &showlist); 4226 4227 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support, 4228 &may_insert_tracepoints_1, _("\ 4229 Set permission to insert tracepoints in the target."), _("\ 4230 Show permission to insert tracepoints in the target."), _("\ 4231 When this permission is on, GDB may insert tracepoints in the program.\n\ 4232 Otherwise, any sort of insertion attempt will result in an error."), 4233 set_target_permissions, NULL, 4234 &setlist, &showlist); 4235 4236 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support, 4237 &may_insert_fast_tracepoints_1, _("\ 4238 Set permission to insert fast tracepoints in the target."), _("\ 4239 Show permission to insert fast tracepoints in the target."), _("\ 4240 When this permission is on, GDB may insert fast tracepoints.\n\ 4241 Otherwise, any sort of insertion attempt will result in an error."), 4242 set_target_permissions, NULL, 4243 &setlist, &showlist); 4244 4245 add_setshow_boolean_cmd ("may-interrupt", class_support, 4246 &may_stop_1, _("\ 4247 Set permission to interrupt or signal the target."), _("\ 4248 Show permission to interrupt or signal the target."), _("\ 4249 When this permission is on, GDB may interrupt/stop the target's execution.\n\ 4250 Otherwise, any attempt to interrupt or stop will be ignored."), 4251 set_target_permissions, NULL, 4252 &setlist, &showlist); 4253 4254 add_com ("flash-erase", no_class, flash_erase_command, 4255 _("Erase all flash memory regions.")); 4256 4257 add_setshow_boolean_cmd ("auto-connect-native-target", class_support, 4258 &auto_connect_native_target, _("\ 4259 Set whether GDB may automatically connect to the native target."), _("\ 4260 Show whether GDB may automatically connect to the native target."), _("\ 4261 When on, and GDB is not connected to a target yet, GDB\n\ 4262 attempts \"run\" and other commands with the native target."), 4263 NULL, show_auto_connect_native_target, 4264 &setlist, &showlist); 4265 } 4266