xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/target.c (revision 8b657b0747480f8989760d71343d6dd33f8d4cf9)
1 /* Select target systems and architectures at runtime for GDB.
2 
3    Copyright (C) 1990-2023 Free Software Foundation, Inc.
4 
5    Contributed by Cygnus Support.
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "top.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57 
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59 
60 static void default_terminal_info (struct target_ops *, const char *, int);
61 
62 static int default_watchpoint_addr_within_range (struct target_ops *,
63 						 CORE_ADDR, CORE_ADDR, int);
64 
65 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
66 						CORE_ADDR, int);
67 
68 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
69 
70 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
71 					 long lwp, ULONGEST tid);
72 
73 static void default_mourn_inferior (struct target_ops *self);
74 
75 static int default_search_memory (struct target_ops *ops,
76 				  CORE_ADDR start_addr,
77 				  ULONGEST search_space_len,
78 				  const gdb_byte *pattern,
79 				  ULONGEST pattern_len,
80 				  CORE_ADDR *found_addrp);
81 
82 static int default_verify_memory (struct target_ops *self,
83 				  const gdb_byte *data,
84 				  CORE_ADDR memaddr, ULONGEST size);
85 
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
87 
88 static struct target_ops *find_default_run_target (const char *);
89 
90 static int dummy_find_memory_regions (struct target_ops *self,
91 				      find_memory_region_ftype ignore1,
92 				      void *ignore2);
93 
94 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
95   (struct target_ops *self, bfd *ignore1, int *ignore2);
96 
97 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
98 
99 static enum exec_direction_kind default_execution_direction
100     (struct target_ops *self);
101 
102 /* Mapping between target_info objects (which have address identity)
103    and corresponding open/factory function/callback.  Each add_target
104    call adds one entry to this map, and registers a "target
105    TARGET_NAME" command that when invoked calls the factory registered
106    here.  The target_info object is associated with the command via
107    the command's context.  */
108 static std::unordered_map<const target_info *, target_open_ftype *>
109   target_factories;
110 
111 /* The singleton debug target.  */
112 
113 static struct target_ops *the_debug_target;
114 
115 /* Command list for target.  */
116 
117 static struct cmd_list_element *targetlist = NULL;
118 
119 /* True if we should trust readonly sections from the
120    executable when reading memory.  */
121 
122 static bool trust_readonly = false;
123 
124 /* Nonzero if we should show true memory content including
125    memory breakpoint inserted by gdb.  */
126 
127 static int show_memory_breakpoints = 0;
128 
129 /* These globals control whether GDB attempts to perform these
130    operations; they are useful for targets that need to prevent
131    inadvertent disruption, such as in non-stop mode.  */
132 
133 bool may_write_registers = true;
134 
135 bool may_write_memory = true;
136 
137 bool may_insert_breakpoints = true;
138 
139 bool may_insert_tracepoints = true;
140 
141 bool may_insert_fast_tracepoints = true;
142 
143 bool may_stop = true;
144 
145 /* Non-zero if we want to see trace of target level stuff.  */
146 
147 static unsigned int targetdebug = 0;
148 
149 static void
150 set_targetdebug  (const char *args, int from_tty, struct cmd_list_element *c)
151 {
152   if (targetdebug)
153     current_inferior ()->push_target (the_debug_target);
154   else
155     current_inferior ()->unpush_target (the_debug_target);
156 }
157 
158 static void
159 show_targetdebug (struct ui_file *file, int from_tty,
160 		  struct cmd_list_element *c, const char *value)
161 {
162   gdb_printf (file, _("Target debugging is %s.\n"), value);
163 }
164 
165 int
166 target_has_memory ()
167 {
168   for (target_ops *t = current_inferior ()->top_target ();
169        t != NULL;
170        t = t->beneath ())
171     if (t->has_memory ())
172       return 1;
173 
174   return 0;
175 }
176 
177 int
178 target_has_stack ()
179 {
180   for (target_ops *t = current_inferior ()->top_target ();
181        t != NULL;
182        t = t->beneath ())
183     if (t->has_stack ())
184       return 1;
185 
186   return 0;
187 }
188 
189 int
190 target_has_registers ()
191 {
192   for (target_ops *t = current_inferior ()->top_target ();
193        t != NULL;
194        t = t->beneath ())
195     if (t->has_registers ())
196       return 1;
197 
198   return 0;
199 }
200 
201 bool
202 target_has_execution (inferior *inf)
203 {
204   if (inf == nullptr)
205     inf = current_inferior ();
206 
207   for (target_ops *t = inf->top_target ();
208        t != nullptr;
209        t = inf->find_target_beneath (t))
210     if (t->has_execution (inf))
211       return true;
212 
213   return false;
214 }
215 
216 const char *
217 target_shortname ()
218 {
219   return current_inferior ()->top_target ()->shortname ();
220 }
221 
222 /* See target.h.  */
223 
224 bool
225 target_attach_no_wait ()
226 {
227   return current_inferior ()->top_target ()->attach_no_wait ();
228 }
229 
230 /* See target.h.  */
231 
232 void
233 target_post_attach (int pid)
234 {
235   return current_inferior ()->top_target ()->post_attach (pid);
236 }
237 
238 /* See target.h.  */
239 
240 void
241 target_prepare_to_store (regcache *regcache)
242 {
243   return current_inferior ()->top_target ()->prepare_to_store (regcache);
244 }
245 
246 /* See target.h.  */
247 
248 bool
249 target_supports_enable_disable_tracepoint ()
250 {
251   target_ops *target = current_inferior ()->top_target ();
252 
253   return target->supports_enable_disable_tracepoint ();
254 }
255 
256 bool
257 target_supports_string_tracing ()
258 {
259   return current_inferior ()->top_target ()->supports_string_tracing ();
260 }
261 
262 /* See target.h.  */
263 
264 bool
265 target_supports_evaluation_of_breakpoint_conditions ()
266 {
267   target_ops *target = current_inferior ()->top_target ();
268 
269   return target->supports_evaluation_of_breakpoint_conditions ();
270 }
271 
272 /* See target.h.  */
273 
274 bool
275 target_supports_dumpcore ()
276 {
277   return current_inferior ()->top_target ()->supports_dumpcore ();
278 }
279 
280 /* See target.h.  */
281 
282 void
283 target_dumpcore (const char *filename)
284 {
285   return current_inferior ()->top_target ()->dumpcore (filename);
286 }
287 
288 /* See target.h.  */
289 
290 bool
291 target_can_run_breakpoint_commands ()
292 {
293   return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
294 }
295 
296 /* See target.h.  */
297 
298 void
299 target_files_info ()
300 {
301   return current_inferior ()->top_target ()->files_info ();
302 }
303 
304 /* See target.h.  */
305 
306 int
307 target_insert_fork_catchpoint (int pid)
308 {
309   return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
310 }
311 
312 /* See target.h.  */
313 
314 int
315 target_remove_fork_catchpoint (int pid)
316 {
317   return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
318 }
319 
320 /* See target.h.  */
321 
322 int
323 target_insert_vfork_catchpoint (int pid)
324 {
325   return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
326 }
327 
328 /* See target.h.  */
329 
330 int
331 target_remove_vfork_catchpoint (int pid)
332 {
333   return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
334 }
335 
336 /* See target.h.  */
337 
338 int
339 target_insert_exec_catchpoint (int pid)
340 {
341   return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
342 }
343 
344 /* See target.h.  */
345 
346 int
347 target_remove_exec_catchpoint (int pid)
348 {
349   return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
350 }
351 
352 /* See target.h.  */
353 
354 int
355 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
356 			       gdb::array_view<const int> syscall_counts)
357 {
358   target_ops *target = current_inferior ()->top_target ();
359 
360   return target->set_syscall_catchpoint (pid, needed, any_count,
361 					 syscall_counts);
362 }
363 
364 /* See target.h.  */
365 
366 void
367 target_rcmd (const char *command, struct ui_file *outbuf)
368 {
369   return current_inferior ()->top_target ()->rcmd (command, outbuf);
370 }
371 
372 /* See target.h.  */
373 
374 bool
375 target_can_lock_scheduler ()
376 {
377   target_ops *target = current_inferior ()->top_target ();
378 
379   return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
380 }
381 
382 /* See target.h.  */
383 
384 bool
385 target_can_async_p ()
386 {
387   return target_can_async_p (current_inferior ()->top_target ());
388 }
389 
390 /* See target.h.  */
391 
392 bool
393 target_can_async_p (struct target_ops *target)
394 {
395   if (!target_async_permitted)
396     return false;
397   return target->can_async_p ();
398 }
399 
400 /* See target.h.  */
401 
402 bool
403 target_is_async_p ()
404 {
405   bool result = current_inferior ()->top_target ()->is_async_p ();
406   gdb_assert (target_async_permitted || !result);
407   return result;
408 }
409 
410 exec_direction_kind
411 target_execution_direction ()
412 {
413   return current_inferior ()->top_target ()->execution_direction ();
414 }
415 
416 /* See target.h.  */
417 
418 const char *
419 target_extra_thread_info (thread_info *tp)
420 {
421   return current_inferior ()->top_target ()->extra_thread_info (tp);
422 }
423 
424 /* See target.h.  */
425 
426 const char *
427 target_pid_to_exec_file (int pid)
428 {
429   return current_inferior ()->top_target ()->pid_to_exec_file (pid);
430 }
431 
432 /* See target.h.  */
433 
434 gdbarch *
435 target_thread_architecture (ptid_t ptid)
436 {
437   return current_inferior ()->top_target ()->thread_architecture (ptid);
438 }
439 
440 /* See target.h.  */
441 
442 int
443 target_find_memory_regions (find_memory_region_ftype func, void *data)
444 {
445   return current_inferior ()->top_target ()->find_memory_regions (func, data);
446 }
447 
448 /* See target.h.  */
449 
450 gdb::unique_xmalloc_ptr<char>
451 target_make_corefile_notes (bfd *bfd, int *size_p)
452 {
453   return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
454 }
455 
456 gdb_byte *
457 target_get_bookmark (const char *args, int from_tty)
458 {
459   return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
460 }
461 
462 void
463 target_goto_bookmark (const gdb_byte *arg, int from_tty)
464 {
465   return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
466 }
467 
468 /* See target.h.  */
469 
470 bool
471 target_stopped_by_watchpoint ()
472 {
473   return current_inferior ()->top_target ()->stopped_by_watchpoint ();
474 }
475 
476 /* See target.h.  */
477 
478 bool
479 target_stopped_by_sw_breakpoint ()
480 {
481   return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
482 }
483 
484 bool
485 target_supports_stopped_by_sw_breakpoint ()
486 {
487   target_ops *target = current_inferior ()->top_target ();
488 
489   return target->supports_stopped_by_sw_breakpoint ();
490 }
491 
492 bool
493 target_stopped_by_hw_breakpoint ()
494 {
495   return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
496 }
497 
498 bool
499 target_supports_stopped_by_hw_breakpoint ()
500 {
501   target_ops *target = current_inferior ()->top_target ();
502 
503   return target->supports_stopped_by_hw_breakpoint ();
504 }
505 
506 /* See target.h.  */
507 
508 bool
509 target_have_steppable_watchpoint ()
510 {
511   return current_inferior ()->top_target ()->have_steppable_watchpoint ();
512 }
513 
514 /* See target.h.  */
515 
516 int
517 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
518 {
519   target_ops *target = current_inferior ()->top_target ();
520 
521   return target->can_use_hw_breakpoint (type, cnt, othertype);
522 }
523 
524 /* See target.h.  */
525 
526 int
527 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
528 {
529   target_ops *target = current_inferior ()->top_target ();
530 
531   return target->region_ok_for_hw_watchpoint (addr, len);
532 }
533 
534 
535 int
536 target_can_do_single_step ()
537 {
538   return current_inferior ()->top_target ()->can_do_single_step ();
539 }
540 
541 /* See target.h.  */
542 
543 int
544 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
545 			  expression *cond)
546 {
547   target_ops *target = current_inferior ()->top_target ();
548 
549   return target->insert_watchpoint (addr, len, type, cond);
550 }
551 
552 /* See target.h.  */
553 
554 int
555 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
556 			  expression *cond)
557 {
558   target_ops *target = current_inferior ()->top_target ();
559 
560   return target->remove_watchpoint (addr, len, type, cond);
561 }
562 
563 /* See target.h.  */
564 
565 int
566 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
567 {
568   target_ops *target = current_inferior ()->top_target ();
569 
570   return target->insert_hw_breakpoint (gdbarch, bp_tgt);
571 }
572 
573 /* See target.h.  */
574 
575 int
576 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
577 {
578   target_ops *target = current_inferior ()->top_target ();
579 
580   return target->remove_hw_breakpoint (gdbarch, bp_tgt);
581 }
582 
583 /* See target.h.  */
584 
585 bool
586 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
587 				       expression *cond)
588 {
589   target_ops *target = current_inferior ()->top_target ();
590 
591   return target->can_accel_watchpoint_condition (addr, len, type, cond);
592 }
593 
594 /* See target.h.  */
595 
596 bool
597 target_can_execute_reverse ()
598 {
599   return current_inferior ()->top_target ()->can_execute_reverse ();
600 }
601 
602 ptid_t
603 target_get_ada_task_ptid (long lwp, ULONGEST tid)
604 {
605   return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
606 }
607 
608 bool
609 target_filesystem_is_local ()
610 {
611   return current_inferior ()->top_target ()->filesystem_is_local ();
612 }
613 
614 void
615 target_trace_init ()
616 {
617   return current_inferior ()->top_target ()->trace_init ();
618 }
619 
620 void
621 target_download_tracepoint (bp_location *location)
622 {
623   return current_inferior ()->top_target ()->download_tracepoint (location);
624 }
625 
626 bool
627 target_can_download_tracepoint ()
628 {
629   return current_inferior ()->top_target ()->can_download_tracepoint ();
630 }
631 
632 void
633 target_download_trace_state_variable (const trace_state_variable &tsv)
634 {
635   target_ops *target = current_inferior ()->top_target ();
636 
637   return target->download_trace_state_variable (tsv);
638 }
639 
640 void
641 target_enable_tracepoint (bp_location *loc)
642 {
643   return current_inferior ()->top_target ()->enable_tracepoint (loc);
644 }
645 
646 void
647 target_disable_tracepoint (bp_location *loc)
648 {
649   return current_inferior ()->top_target ()->disable_tracepoint (loc);
650 }
651 
652 void
653 target_trace_start ()
654 {
655   return current_inferior ()->top_target ()->trace_start ();
656 }
657 
658 void
659 target_trace_set_readonly_regions ()
660 {
661   return current_inferior ()->top_target ()->trace_set_readonly_regions ();
662 }
663 
664 int
665 target_get_trace_status (trace_status *ts)
666 {
667   return current_inferior ()->top_target ()->get_trace_status (ts);
668 }
669 
670 void
671 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
672 {
673   return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
674 }
675 
676 void
677 target_trace_stop ()
678 {
679   return current_inferior ()->top_target ()->trace_stop ();
680 }
681 
682 int
683 target_trace_find (trace_find_type type, int num,
684 		   CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
685 {
686   target_ops *target = current_inferior ()->top_target ();
687 
688   return target->trace_find (type, num, addr1, addr2, tpp);
689 }
690 
691 bool
692 target_get_trace_state_variable_value (int tsv, LONGEST *val)
693 {
694   target_ops *target = current_inferior ()->top_target ();
695 
696   return target->get_trace_state_variable_value (tsv, val);
697 }
698 
699 int
700 target_save_trace_data (const char *filename)
701 {
702   return current_inferior ()->top_target ()->save_trace_data (filename);
703 }
704 
705 int
706 target_upload_tracepoints (uploaded_tp **utpp)
707 {
708   return current_inferior ()->top_target ()->upload_tracepoints (utpp);
709 }
710 
711 int
712 target_upload_trace_state_variables (uploaded_tsv **utsvp)
713 {
714   target_ops *target = current_inferior ()->top_target ();
715 
716   return target->upload_trace_state_variables (utsvp);
717 }
718 
719 LONGEST
720 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
721 {
722   target_ops *target = current_inferior ()->top_target ();
723 
724   return target->get_raw_trace_data (buf, offset, len);
725 }
726 
727 int
728 target_get_min_fast_tracepoint_insn_len ()
729 {
730   target_ops *target = current_inferior ()->top_target ();
731 
732   return target->get_min_fast_tracepoint_insn_len ();
733 }
734 
735 void
736 target_set_disconnected_tracing (int val)
737 {
738   return current_inferior ()->top_target ()->set_disconnected_tracing (val);
739 }
740 
741 void
742 target_set_circular_trace_buffer (int val)
743 {
744   return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
745 }
746 
747 void
748 target_set_trace_buffer_size (LONGEST val)
749 {
750   return current_inferior ()->top_target ()->set_trace_buffer_size (val);
751 }
752 
753 bool
754 target_set_trace_notes (const char *user, const char *notes,
755 			const char *stopnotes)
756 {
757   target_ops *target = current_inferior ()->top_target ();
758 
759   return target->set_trace_notes (user, notes, stopnotes);
760 }
761 
762 bool
763 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
764 {
765   return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
766 }
767 
768 void
769 target_set_permissions ()
770 {
771   return current_inferior ()->top_target ()->set_permissions ();
772 }
773 
774 bool
775 target_static_tracepoint_marker_at (CORE_ADDR addr,
776 				    static_tracepoint_marker *marker)
777 {
778   target_ops *target = current_inferior ()->top_target ();
779 
780   return target->static_tracepoint_marker_at (addr, marker);
781 }
782 
783 std::vector<static_tracepoint_marker>
784 target_static_tracepoint_markers_by_strid (const char *marker_id)
785 {
786   target_ops *target = current_inferior ()->top_target ();
787 
788   return target->static_tracepoint_markers_by_strid (marker_id);
789 }
790 
791 traceframe_info_up
792 target_traceframe_info ()
793 {
794   return current_inferior ()->top_target ()->traceframe_info ();
795 }
796 
797 bool
798 target_use_agent (bool use)
799 {
800   return current_inferior ()->top_target ()->use_agent (use);
801 }
802 
803 bool
804 target_can_use_agent ()
805 {
806   return current_inferior ()->top_target ()->can_use_agent ();
807 }
808 
809 bool
810 target_augmented_libraries_svr4_read ()
811 {
812   return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
813 }
814 
815 bool
816 target_supports_memory_tagging ()
817 {
818   return current_inferior ()->top_target ()->supports_memory_tagging ();
819 }
820 
821 bool
822 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
823 		      int type)
824 {
825   return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
826 }
827 
828 bool
829 target_store_memtags (CORE_ADDR address, size_t len,
830 		      const gdb::byte_vector &tags, int type)
831 {
832   return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
833 }
834 
835 void
836 target_log_command (const char *p)
837 {
838   return current_inferior ()->top_target ()->log_command (p);
839 }
840 
841 /* This is used to implement the various target commands.  */
842 
843 static void
844 open_target (const char *args, int from_tty, struct cmd_list_element *command)
845 {
846   auto *ti = static_cast<target_info *> (command->context ());
847   target_open_ftype *func = target_factories[ti];
848 
849   if (targetdebug)
850     gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
851 		ti->shortname);
852 
853   func (args, from_tty);
854 
855   if (targetdebug)
856     gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
857 		ti->shortname, args, from_tty);
858 }
859 
860 /* See target.h.  */
861 
862 void
863 add_target (const target_info &t, target_open_ftype *func,
864 	    completer_ftype *completer)
865 {
866   struct cmd_list_element *c;
867 
868   auto &func_slot = target_factories[&t];
869   if (func_slot != nullptr)
870     internal_error (_("target already added (\"%s\")."), t.shortname);
871   func_slot = func;
872 
873   if (targetlist == NULL)
874     add_basic_prefix_cmd ("target", class_run, _("\
875 Connect to a target machine or process.\n\
876 The first argument is the type or protocol of the target machine.\n\
877 Remaining arguments are interpreted by the target protocol.  For more\n\
878 information on the arguments for a particular protocol, type\n\
879 `help target ' followed by the protocol name."),
880 			  &targetlist, 0, &cmdlist);
881   c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
882   c->set_context ((void *) &t);
883   c->func = open_target;
884   if (completer != NULL)
885     set_cmd_completer (c, completer);
886 }
887 
888 /* See target.h.  */
889 
890 void
891 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
892 {
893   struct cmd_list_element *c;
894 
895   /* If we use add_alias_cmd, here, we do not get the deprecated warning,
896      see PR cli/15104.  */
897   c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
898   c->func = open_target;
899   c->set_context ((void *) &tinfo);
900   gdb::unique_xmalloc_ptr<char> alt
901     = xstrprintf ("target %s", tinfo.shortname);
902   deprecate_cmd (c, alt.release ());
903 }
904 
905 /* Stub functions */
906 
907 void
908 target_kill (void)
909 {
910 
911   /* If the commit_resume_state of the to-be-killed-inferior's process stratum
912      is true, and this inferior is the last live inferior with resumed threads
913      of that target, then we want to leave commit_resume_state to false, as the
914      target won't have any resumed threads anymore.  We achieve this with
915      this scoped_disable_commit_resumed.  On construction, it will set the flag
916      to false.  On destruction, it will only set it to true if there are resumed
917      threads left.  */
918   scoped_disable_commit_resumed disable ("killing");
919   current_inferior ()->top_target ()->kill ();
920 }
921 
922 void
923 target_load (const char *arg, int from_tty)
924 {
925   target_dcache_invalidate ();
926   current_inferior ()->top_target ()->load (arg, from_tty);
927 }
928 
929 /* Define it.  */
930 
931 target_terminal_state target_terminal::m_terminal_state
932   = target_terminal_state::is_ours;
933 
934 /* See target/target.h.  */
935 
936 void
937 target_terminal::init (void)
938 {
939   current_inferior ()->top_target ()->terminal_init ();
940 
941   m_terminal_state = target_terminal_state::is_ours;
942 }
943 
944 /* See target/target.h.  */
945 
946 void
947 target_terminal::inferior (void)
948 {
949   struct ui *ui = current_ui;
950 
951   /* A background resume (``run&'') should leave GDB in control of the
952      terminal.  */
953   if (ui->prompt_state != PROMPT_BLOCKED)
954     return;
955 
956   /* Since we always run the inferior in the main console (unless "set
957      inferior-tty" is in effect), when some UI other than the main one
958      calls target_terminal::inferior, then we leave the main UI's
959      terminal settings as is.  */
960   if (ui != main_ui)
961     return;
962 
963   /* If GDB is resuming the inferior in the foreground, install
964      inferior's terminal modes.  */
965 
966   struct inferior *inf = current_inferior ();
967 
968   if (inf->terminal_state != target_terminal_state::is_inferior)
969     {
970       current_inferior ()->top_target ()->terminal_inferior ();
971       inf->terminal_state = target_terminal_state::is_inferior;
972     }
973 
974   m_terminal_state = target_terminal_state::is_inferior;
975 
976   /* If the user hit C-c before, pretend that it was hit right
977      here.  */
978   if (check_quit_flag ())
979     target_pass_ctrlc ();
980 }
981 
982 /* See target/target.h.  */
983 
984 void
985 target_terminal::restore_inferior (void)
986 {
987   struct ui *ui = current_ui;
988 
989   /* See target_terminal::inferior().  */
990   if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
991     return;
992 
993   /* Restore the terminal settings of inferiors that were in the
994      foreground but are now ours_for_output due to a temporary
995      target_target::ours_for_output() call.  */
996 
997   {
998     scoped_restore_current_inferior restore_inferior;
999 
1000     for (::inferior *inf : all_inferiors ())
1001       {
1002 	if (inf->terminal_state == target_terminal_state::is_ours_for_output)
1003 	  {
1004 	    set_current_inferior (inf);
1005 	    current_inferior ()->top_target ()->terminal_inferior ();
1006 	    inf->terminal_state = target_terminal_state::is_inferior;
1007 	  }
1008       }
1009   }
1010 
1011   m_terminal_state = target_terminal_state::is_inferior;
1012 
1013   /* If the user hit C-c before, pretend that it was hit right
1014      here.  */
1015   if (check_quit_flag ())
1016     target_pass_ctrlc ();
1017 }
1018 
1019 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1020    is_ours_for_output.  */
1021 
1022 static void
1023 target_terminal_is_ours_kind (target_terminal_state desired_state)
1024 {
1025   scoped_restore_current_inferior restore_inferior;
1026 
1027   /* Must do this in two passes.  First, have all inferiors save the
1028      current terminal settings.  Then, after all inferiors have add a
1029      chance to safely save the terminal settings, restore GDB's
1030      terminal settings.  */
1031 
1032   for (inferior *inf : all_inferiors ())
1033     {
1034       if (inf->terminal_state == target_terminal_state::is_inferior)
1035 	{
1036 	  set_current_inferior (inf);
1037 	  current_inferior ()->top_target ()->terminal_save_inferior ();
1038 	}
1039     }
1040 
1041   for (inferior *inf : all_inferiors ())
1042     {
1043       /* Note we don't check is_inferior here like above because we
1044 	 need to handle 'is_ours_for_output -> is_ours' too.  Careful
1045 	 to never transition from 'is_ours' to 'is_ours_for_output',
1046 	 though.  */
1047       if (inf->terminal_state != target_terminal_state::is_ours
1048 	  && inf->terminal_state != desired_state)
1049 	{
1050 	  set_current_inferior (inf);
1051 	  if (desired_state == target_terminal_state::is_ours)
1052 	    current_inferior ()->top_target ()->terminal_ours ();
1053 	  else if (desired_state == target_terminal_state::is_ours_for_output)
1054 	    current_inferior ()->top_target ()->terminal_ours_for_output ();
1055 	  else
1056 	    gdb_assert_not_reached ("unhandled desired state");
1057 	  inf->terminal_state = desired_state;
1058 	}
1059     }
1060 }
1061 
1062 /* See target/target.h.  */
1063 
1064 void
1065 target_terminal::ours ()
1066 {
1067   struct ui *ui = current_ui;
1068 
1069   /* See target_terminal::inferior.  */
1070   if (ui != main_ui)
1071     return;
1072 
1073   if (m_terminal_state == target_terminal_state::is_ours)
1074     return;
1075 
1076   target_terminal_is_ours_kind (target_terminal_state::is_ours);
1077   m_terminal_state = target_terminal_state::is_ours;
1078 }
1079 
1080 /* See target/target.h.  */
1081 
1082 void
1083 target_terminal::ours_for_output ()
1084 {
1085   struct ui *ui = current_ui;
1086 
1087   /* See target_terminal::inferior.  */
1088   if (ui != main_ui)
1089     return;
1090 
1091   if (!target_terminal::is_inferior ())
1092     return;
1093 
1094   target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1095   target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1096 }
1097 
1098 /* See target/target.h.  */
1099 
1100 void
1101 target_terminal::info (const char *arg, int from_tty)
1102 {
1103   current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1104 }
1105 
1106 /* See target.h.  */
1107 
1108 bool
1109 target_supports_terminal_ours (void)
1110 {
1111   /* The current top target is the target at the top of the target
1112      stack of the current inferior.  While normally there's always an
1113      inferior, we must check for nullptr here because we can get here
1114      very early during startup, before the initial inferior is first
1115      created.  */
1116   inferior *inf = current_inferior ();
1117 
1118   if (inf == nullptr)
1119     return false;
1120   return inf->top_target ()->supports_terminal_ours ();
1121 }
1122 
1123 static void
1124 tcomplain (void)
1125 {
1126   error (_("You can't do that when your target is `%s'"),
1127 	 current_inferior ()->top_target ()->shortname ());
1128 }
1129 
1130 void
1131 noprocess (void)
1132 {
1133   error (_("You can't do that without a process to debug."));
1134 }
1135 
1136 static void
1137 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1138 {
1139   gdb_printf (_("No saved terminal information.\n"));
1140 }
1141 
1142 /* A default implementation for the to_get_ada_task_ptid target method.
1143 
1144    This function builds the PTID by using both LWP and TID as part of
1145    the PTID lwp and tid elements.  The pid used is the pid of the
1146    inferior_ptid.  */
1147 
1148 static ptid_t
1149 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1150 {
1151   return ptid_t (inferior_ptid.pid (), lwp, tid);
1152 }
1153 
1154 static enum exec_direction_kind
1155 default_execution_direction (struct target_ops *self)
1156 {
1157   if (!target_can_execute_reverse ())
1158     return EXEC_FORWARD;
1159   else if (!target_can_async_p ())
1160     return EXEC_FORWARD;
1161   else
1162     gdb_assert_not_reached ("\
1163 to_execution_direction must be implemented for reverse async");
1164 }
1165 
1166 /* See target.h.  */
1167 
1168 void
1169 target_ops_ref_policy::decref (target_ops *t)
1170 {
1171   t->decref ();
1172   if (t->refcount () == 0)
1173     {
1174       if (t->stratum () == process_stratum)
1175 	connection_list_remove (as_process_stratum_target (t));
1176       target_close (t);
1177     }
1178 }
1179 
1180 /* See target.h.  */
1181 
1182 void
1183 target_stack::push (target_ops *t)
1184 {
1185   /* We must create a new reference first.  It is possible that T is
1186      already pushed on this target stack, in which case we will first
1187      unpush it below, before re-pushing it.  If we don't increment the
1188      reference count now, then when we unpush it, we might end up deleting
1189      T, which is not good.  */
1190   auto ref = target_ops_ref::new_reference (t);
1191 
1192   strata stratum = t->stratum ();
1193 
1194   /* If there's already a target at this stratum, remove it.  */
1195 
1196   if (m_stack[stratum].get () != nullptr)
1197     unpush (m_stack[stratum].get ());
1198 
1199   /* Now add the new one.  */
1200   m_stack[stratum] = std::move (ref);
1201 
1202   if (m_top < stratum)
1203     m_top = stratum;
1204 
1205   if (stratum == process_stratum)
1206     connection_list_add (as_process_stratum_target (t));
1207 }
1208 
1209 /* See target.h.  */
1210 
1211 bool
1212 target_stack::unpush (target_ops *t)
1213 {
1214   gdb_assert (t != NULL);
1215 
1216   strata stratum = t->stratum ();
1217 
1218   if (stratum == dummy_stratum)
1219     internal_error (_("Attempt to unpush the dummy target"));
1220 
1221   /* Look for the specified target.  Note that a target can only occur
1222      once in the target stack.  */
1223 
1224   if (m_stack[stratum] != t)
1225     {
1226       /* If T wasn't pushed, quit.  Only open targets should be
1227 	 closed.  */
1228       return false;
1229     }
1230 
1231   if (m_top == stratum)
1232     m_top = this->find_beneath (t)->stratum ();
1233 
1234   /* Move the target reference off the target stack, this sets the pointer
1235      held in m_stack to nullptr, and places the reference in ref.  When
1236      ref goes out of scope its reference count will be decremented, which
1237      might cause the target to close.
1238 
1239      We have to do it this way, and not just set the value in m_stack to
1240      nullptr directly, because doing so would decrement the reference
1241      count first, which might close the target, and closing the target
1242      does a check that the target is not on any inferiors target_stack.  */
1243   auto ref = std::move (m_stack[stratum]);
1244 
1245   return true;
1246 }
1247 
1248 void
1249 target_unpusher::operator() (struct target_ops *ops) const
1250 {
1251   current_inferior ()->unpush_target (ops);
1252 }
1253 
1254 /* Default implementation of to_get_thread_local_address.  */
1255 
1256 static void
1257 generic_tls_error (void)
1258 {
1259   throw_error (TLS_GENERIC_ERROR,
1260 	       _("Cannot find thread-local variables on this target"));
1261 }
1262 
1263 /* Using the objfile specified in OBJFILE, find the address for the
1264    current thread's thread-local storage with offset OFFSET.  */
1265 CORE_ADDR
1266 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1267 {
1268   volatile CORE_ADDR addr = 0;
1269   struct target_ops *target = current_inferior ()->top_target ();
1270   struct gdbarch *gdbarch = target_gdbarch ();
1271 
1272   /* If OBJFILE is a separate debug object file, look for the
1273      original object file.  */
1274   if (objfile->separate_debug_objfile_backlink != NULL)
1275     objfile = objfile->separate_debug_objfile_backlink;
1276 
1277   if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1278     {
1279       ptid_t ptid = inferior_ptid;
1280 
1281       try
1282 	{
1283 	  CORE_ADDR lm_addr;
1284 
1285 	  /* Fetch the load module address for this objfile.  */
1286 	  lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1287 							   objfile);
1288 
1289 	  if (gdbarch_get_thread_local_address_p (gdbarch))
1290 	    addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1291 						     offset);
1292 	  else
1293 	    addr = target->get_thread_local_address (ptid, lm_addr, offset);
1294 	}
1295       /* If an error occurred, print TLS related messages here.  Otherwise,
1296 	 throw the error to some higher catcher.  */
1297       catch (const gdb_exception &ex)
1298 	{
1299 	  int objfile_is_library = (objfile->flags & OBJF_SHARED);
1300 
1301 	  switch (ex.error)
1302 	    {
1303 	    case TLS_NO_LIBRARY_SUPPORT_ERROR:
1304 	      error (_("Cannot find thread-local variables "
1305 		       "in this thread library."));
1306 	      break;
1307 	    case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1308 	      if (objfile_is_library)
1309 		error (_("Cannot find shared library `%s' in dynamic"
1310 			 " linker's load module list"), objfile_name (objfile));
1311 	      else
1312 		error (_("Cannot find executable file `%s' in dynamic"
1313 			 " linker's load module list"), objfile_name (objfile));
1314 	      break;
1315 	    case TLS_NOT_ALLOCATED_YET_ERROR:
1316 	      if (objfile_is_library)
1317 		error (_("The inferior has not yet allocated storage for"
1318 			 " thread-local variables in\n"
1319 			 "the shared library `%s'\n"
1320 			 "for %s"),
1321 		       objfile_name (objfile),
1322 		       target_pid_to_str (ptid).c_str ());
1323 	      else
1324 		error (_("The inferior has not yet allocated storage for"
1325 			 " thread-local variables in\n"
1326 			 "the executable `%s'\n"
1327 			 "for %s"),
1328 		       objfile_name (objfile),
1329 		       target_pid_to_str (ptid).c_str ());
1330 	      break;
1331 	    case TLS_GENERIC_ERROR:
1332 	      if (objfile_is_library)
1333 		error (_("Cannot find thread-local storage for %s, "
1334 			 "shared library %s:\n%s"),
1335 		       target_pid_to_str (ptid).c_str (),
1336 		       objfile_name (objfile), ex.what ());
1337 	      else
1338 		error (_("Cannot find thread-local storage for %s, "
1339 			 "executable file %s:\n%s"),
1340 		       target_pid_to_str (ptid).c_str (),
1341 		       objfile_name (objfile), ex.what ());
1342 	      break;
1343 	    default:
1344 	      throw;
1345 	      break;
1346 	    }
1347 	}
1348     }
1349   else
1350     error (_("Cannot find thread-local variables on this target"));
1351 
1352   return addr;
1353 }
1354 
1355 const char *
1356 target_xfer_status_to_string (enum target_xfer_status status)
1357 {
1358 #define CASE(X) case X: return #X
1359   switch (status)
1360     {
1361       CASE(TARGET_XFER_E_IO);
1362       CASE(TARGET_XFER_UNAVAILABLE);
1363     default:
1364       return "<unknown>";
1365     }
1366 #undef CASE
1367 };
1368 
1369 
1370 const target_section_table *
1371 target_get_section_table (struct target_ops *target)
1372 {
1373   return target->get_section_table ();
1374 }
1375 
1376 /* Find a section containing ADDR.  */
1377 
1378 const struct target_section *
1379 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1380 {
1381   const target_section_table *table = target_get_section_table (target);
1382 
1383   if (table == NULL)
1384     return NULL;
1385 
1386   for (const target_section &secp : *table)
1387     {
1388       if (addr >= secp.addr && addr < secp.endaddr)
1389 	return &secp;
1390     }
1391   return NULL;
1392 }
1393 
1394 /* See target.h.  */
1395 
1396 const target_section_table *
1397 default_get_section_table ()
1398 {
1399   return &current_program_space->target_sections ();
1400 }
1401 
1402 /* Helper for the memory xfer routines.  Checks the attributes of the
1403    memory region of MEMADDR against the read or write being attempted.
1404    If the access is permitted returns true, otherwise returns false.
1405    REGION_P is an optional output parameter.  If not-NULL, it is
1406    filled with a pointer to the memory region of MEMADDR.  REG_LEN
1407    returns LEN trimmed to the end of the region.  This is how much the
1408    caller can continue requesting, if the access is permitted.  A
1409    single xfer request must not straddle memory region boundaries.  */
1410 
1411 static int
1412 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1413 			  ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1414 			  struct mem_region **region_p)
1415 {
1416   struct mem_region *region;
1417 
1418   region = lookup_mem_region (memaddr);
1419 
1420   if (region_p != NULL)
1421     *region_p = region;
1422 
1423   switch (region->attrib.mode)
1424     {
1425     case MEM_RO:
1426       if (writebuf != NULL)
1427 	return 0;
1428       break;
1429 
1430     case MEM_WO:
1431       if (readbuf != NULL)
1432 	return 0;
1433       break;
1434 
1435     case MEM_FLASH:
1436       /* We only support writing to flash during "load" for now.  */
1437       if (writebuf != NULL)
1438 	error (_("Writing to flash memory forbidden in this context"));
1439       break;
1440 
1441     case MEM_NONE:
1442       return 0;
1443     }
1444 
1445   /* region->hi == 0 means there's no upper bound.  */
1446   if (memaddr + len < region->hi || region->hi == 0)
1447     *reg_len = len;
1448   else
1449     *reg_len = region->hi - memaddr;
1450 
1451   return 1;
1452 }
1453 
1454 /* Read memory from more than one valid target.  A core file, for
1455    instance, could have some of memory but delegate other bits to
1456    the target below it.  So, we must manually try all targets.  */
1457 
1458 enum target_xfer_status
1459 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1460 			 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1461 			 ULONGEST *xfered_len)
1462 {
1463   enum target_xfer_status res;
1464 
1465   do
1466     {
1467       res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1468 			       readbuf, writebuf, memaddr, len,
1469 			       xfered_len);
1470       if (res == TARGET_XFER_OK)
1471 	break;
1472 
1473       /* Stop if the target reports that the memory is not available.  */
1474       if (res == TARGET_XFER_UNAVAILABLE)
1475 	break;
1476 
1477       /* Don't continue past targets which have all the memory.
1478 	 At one time, this code was necessary to read data from
1479 	 executables / shared libraries when data for the requested
1480 	 addresses weren't available in the core file.  But now the
1481 	 core target handles this case itself.  */
1482       if (ops->has_all_memory ())
1483 	break;
1484 
1485       ops = ops->beneath ();
1486     }
1487   while (ops != NULL);
1488 
1489   /* The cache works at the raw memory level.  Make sure the cache
1490      gets updated with raw contents no matter what kind of memory
1491      object was originally being written.  Note we do write-through
1492      first, so that if it fails, we don't write to the cache contents
1493      that never made it to the target.  */
1494   if (writebuf != NULL
1495       && inferior_ptid != null_ptid
1496       && target_dcache_init_p ()
1497       && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1498     {
1499       DCACHE *dcache = target_dcache_get ();
1500 
1501       /* Note that writing to an area of memory which wasn't present
1502 	 in the cache doesn't cause it to be loaded in.  */
1503       dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1504     }
1505 
1506   return res;
1507 }
1508 
1509 /* Perform a partial memory transfer.
1510    For docs see target.h, to_xfer_partial.  */
1511 
1512 static enum target_xfer_status
1513 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1514 		       gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1515 		       ULONGEST len, ULONGEST *xfered_len)
1516 {
1517   enum target_xfer_status res;
1518   ULONGEST reg_len;
1519   struct mem_region *region;
1520   struct inferior *inf;
1521 
1522   /* For accesses to unmapped overlay sections, read directly from
1523      files.  Must do this first, as MEMADDR may need adjustment.  */
1524   if (readbuf != NULL && overlay_debugging)
1525     {
1526       struct obj_section *section = find_pc_overlay (memaddr);
1527 
1528       if (pc_in_unmapped_range (memaddr, section))
1529 	{
1530 	  const target_section_table *table = target_get_section_table (ops);
1531 	  const char *section_name = section->the_bfd_section->name;
1532 
1533 	  memaddr = overlay_mapped_address (memaddr, section);
1534 
1535 	  auto match_cb = [=] (const struct target_section *s)
1536 	    {
1537 	      return (strcmp (section_name, s->the_bfd_section->name) == 0);
1538 	    };
1539 
1540 	  return section_table_xfer_memory_partial (readbuf, writebuf,
1541 						    memaddr, len, xfered_len,
1542 						    *table, match_cb);
1543 	}
1544     }
1545 
1546   /* Try the executable files, if "trust-readonly-sections" is set.  */
1547   if (readbuf != NULL && trust_readonly)
1548     {
1549       const struct target_section *secp
1550 	= target_section_by_addr (ops, memaddr);
1551       if (secp != NULL
1552 	  && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1553 	{
1554 	  const target_section_table *table = target_get_section_table (ops);
1555 	  return section_table_xfer_memory_partial (readbuf, writebuf,
1556 						    memaddr, len, xfered_len,
1557 						    *table);
1558 	}
1559     }
1560 
1561   /* Try GDB's internal data cache.  */
1562 
1563   if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1564 				 &region))
1565     return TARGET_XFER_E_IO;
1566 
1567   if (inferior_ptid != null_ptid)
1568     inf = current_inferior ();
1569   else
1570     inf = NULL;
1571 
1572   if (inf != NULL
1573       && readbuf != NULL
1574       /* The dcache reads whole cache lines; that doesn't play well
1575 	 with reading from a trace buffer, because reading outside of
1576 	 the collected memory range fails.  */
1577       && get_traceframe_number () == -1
1578       && (region->attrib.cache
1579 	  || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1580 	  || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1581     {
1582       DCACHE *dcache = target_dcache_get_or_init ();
1583 
1584       return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1585 					 reg_len, xfered_len);
1586     }
1587 
1588   /* If none of those methods found the memory we wanted, fall back
1589      to a target partial transfer.  Normally a single call to
1590      to_xfer_partial is enough; if it doesn't recognize an object
1591      it will call the to_xfer_partial of the next target down.
1592      But for memory this won't do.  Memory is the only target
1593      object which can be read from more than one valid target.
1594      A core file, for instance, could have some of memory but
1595      delegate other bits to the target below it.  So, we must
1596      manually try all targets.  */
1597 
1598   res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1599 				 xfered_len);
1600 
1601   /* If we still haven't got anything, return the last error.  We
1602      give up.  */
1603   return res;
1604 }
1605 
1606 /* Perform a partial memory transfer.  For docs see target.h,
1607    to_xfer_partial.  */
1608 
1609 static enum target_xfer_status
1610 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1611 		     gdb_byte *readbuf, const gdb_byte *writebuf,
1612 		     ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1613 {
1614   enum target_xfer_status res;
1615 
1616   /* Zero length requests are ok and require no work.  */
1617   if (len == 0)
1618     return TARGET_XFER_EOF;
1619 
1620   memaddr = gdbarch_remove_non_address_bits (target_gdbarch (), memaddr);
1621 
1622   /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1623      breakpoint insns, thus hiding out from higher layers whether
1624      there are software breakpoints inserted in the code stream.  */
1625   if (readbuf != NULL)
1626     {
1627       res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1628 				   xfered_len);
1629 
1630       if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1631 	breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1632     }
1633   else
1634     {
1635       /* A large write request is likely to be partially satisfied
1636 	 by memory_xfer_partial_1.  We will continually malloc
1637 	 and free a copy of the entire write request for breakpoint
1638 	 shadow handling even though we only end up writing a small
1639 	 subset of it.  Cap writes to a limit specified by the target
1640 	 to mitigate this.  */
1641       len = std::min (ops->get_memory_xfer_limit (), len);
1642 
1643       gdb::byte_vector buf (writebuf, writebuf + len);
1644       breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1645       res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1646 				   xfered_len);
1647     }
1648 
1649   return res;
1650 }
1651 
1652 scoped_restore_tmpl<int>
1653 make_scoped_restore_show_memory_breakpoints (int show)
1654 {
1655   return make_scoped_restore (&show_memory_breakpoints, show);
1656 }
1657 
1658 /* For docs see target.h, to_xfer_partial.  */
1659 
1660 enum target_xfer_status
1661 target_xfer_partial (struct target_ops *ops,
1662 		     enum target_object object, const char *annex,
1663 		     gdb_byte *readbuf, const gdb_byte *writebuf,
1664 		     ULONGEST offset, ULONGEST len,
1665 		     ULONGEST *xfered_len)
1666 {
1667   enum target_xfer_status retval;
1668 
1669   /* Transfer is done when LEN is zero.  */
1670   if (len == 0)
1671     return TARGET_XFER_EOF;
1672 
1673   if (writebuf && !may_write_memory)
1674     error (_("Writing to memory is not allowed (addr %s, len %s)"),
1675 	   core_addr_to_string_nz (offset), plongest (len));
1676 
1677   *xfered_len = 0;
1678 
1679   /* If this is a memory transfer, let the memory-specific code
1680      have a look at it instead.  Memory transfers are more
1681      complicated.  */
1682   if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1683       || object == TARGET_OBJECT_CODE_MEMORY)
1684     retval = memory_xfer_partial (ops, object, readbuf,
1685 				  writebuf, offset, len, xfered_len);
1686   else if (object == TARGET_OBJECT_RAW_MEMORY)
1687     {
1688       /* Skip/avoid accessing the target if the memory region
1689 	 attributes block the access.  Check this here instead of in
1690 	 raw_memory_xfer_partial as otherwise we'd end up checking
1691 	 this twice in the case of the memory_xfer_partial path is
1692 	 taken; once before checking the dcache, and another in the
1693 	 tail call to raw_memory_xfer_partial.  */
1694       if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1695 				     NULL))
1696 	return TARGET_XFER_E_IO;
1697 
1698       /* Request the normal memory object from other layers.  */
1699       retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1700 					xfered_len);
1701     }
1702   else
1703     retval = ops->xfer_partial (object, annex, readbuf,
1704 				writebuf, offset, len, xfered_len);
1705 
1706   if (targetdebug)
1707     {
1708       const unsigned char *myaddr = NULL;
1709 
1710       gdb_printf (gdb_stdlog,
1711 		  "%s:target_xfer_partial "
1712 		  "(%d, %s, %s, %s, %s, %s) = %d, %s",
1713 		  ops->shortname (),
1714 		  (int) object,
1715 		  (annex ? annex : "(null)"),
1716 		  host_address_to_string (readbuf),
1717 		  host_address_to_string (writebuf),
1718 		  core_addr_to_string_nz (offset),
1719 		  pulongest (len), retval,
1720 		  pulongest (*xfered_len));
1721 
1722       if (readbuf)
1723 	myaddr = readbuf;
1724       if (writebuf)
1725 	myaddr = writebuf;
1726       if (retval == TARGET_XFER_OK && myaddr != NULL)
1727 	{
1728 	  int i;
1729 
1730 	  gdb_puts (", bytes =", gdb_stdlog);
1731 	  for (i = 0; i < *xfered_len; i++)
1732 	    {
1733 	      if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1734 		{
1735 		  if (targetdebug < 2 && i > 0)
1736 		    {
1737 		      gdb_printf (gdb_stdlog, " ...");
1738 		      break;
1739 		    }
1740 		  gdb_printf (gdb_stdlog, "\n");
1741 		}
1742 
1743 	      gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1744 	    }
1745 	}
1746 
1747       gdb_putc ('\n', gdb_stdlog);
1748     }
1749 
1750   /* Check implementations of to_xfer_partial update *XFERED_LEN
1751      properly.  Do assertion after printing debug messages, so that we
1752      can find more clues on assertion failure from debugging messages.  */
1753   if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1754     gdb_assert (*xfered_len > 0);
1755 
1756   return retval;
1757 }
1758 
1759 /* Read LEN bytes of target memory at address MEMADDR, placing the
1760    results in GDB's memory at MYADDR.  Returns either 0 for success or
1761    -1 if any error occurs.
1762 
1763    If an error occurs, no guarantee is made about the contents of the data at
1764    MYADDR.  In particular, the caller should not depend upon partial reads
1765    filling the buffer with good data.  There is no way for the caller to know
1766    how much good data might have been transfered anyway.  Callers that can
1767    deal with partial reads should call target_read (which will retry until
1768    it makes no progress, and then return how much was transferred).  */
1769 
1770 int
1771 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1772 {
1773   if (target_read (current_inferior ()->top_target (),
1774 		   TARGET_OBJECT_MEMORY, NULL,
1775 		   myaddr, memaddr, len) == len)
1776     return 0;
1777   else
1778     return -1;
1779 }
1780 
1781 /* See target/target.h.  */
1782 
1783 int
1784 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1785 {
1786   gdb_byte buf[4];
1787   int r;
1788 
1789   r = target_read_memory (memaddr, buf, sizeof buf);
1790   if (r != 0)
1791     return r;
1792   *result = extract_unsigned_integer (buf, sizeof buf,
1793 				      gdbarch_byte_order (target_gdbarch ()));
1794   return 0;
1795 }
1796 
1797 /* Like target_read_memory, but specify explicitly that this is a read
1798    from the target's raw memory.  That is, this read bypasses the
1799    dcache, breakpoint shadowing, etc.  */
1800 
1801 int
1802 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1803 {
1804   if (target_read (current_inferior ()->top_target (),
1805 		   TARGET_OBJECT_RAW_MEMORY, NULL,
1806 		   myaddr, memaddr, len) == len)
1807     return 0;
1808   else
1809     return -1;
1810 }
1811 
1812 /* Like target_read_memory, but specify explicitly that this is a read from
1813    the target's stack.  This may trigger different cache behavior.  */
1814 
1815 int
1816 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1817 {
1818   if (target_read (current_inferior ()->top_target (),
1819 		   TARGET_OBJECT_STACK_MEMORY, NULL,
1820 		   myaddr, memaddr, len) == len)
1821     return 0;
1822   else
1823     return -1;
1824 }
1825 
1826 /* Like target_read_memory, but specify explicitly that this is a read from
1827    the target's code.  This may trigger different cache behavior.  */
1828 
1829 int
1830 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1831 {
1832   if (target_read (current_inferior ()->top_target (),
1833 		   TARGET_OBJECT_CODE_MEMORY, NULL,
1834 		   myaddr, memaddr, len) == len)
1835     return 0;
1836   else
1837     return -1;
1838 }
1839 
1840 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1841    Returns either 0 for success or -1 if any error occurs.  If an
1842    error occurs, no guarantee is made about how much data got written.
1843    Callers that can deal with partial writes should call
1844    target_write.  */
1845 
1846 int
1847 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1848 {
1849   if (target_write (current_inferior ()->top_target (),
1850 		    TARGET_OBJECT_MEMORY, NULL,
1851 		    myaddr, memaddr, len) == len)
1852     return 0;
1853   else
1854     return -1;
1855 }
1856 
1857 /* Write LEN bytes from MYADDR to target raw memory at address
1858    MEMADDR.  Returns either 0 for success or -1 if any error occurs.
1859    If an error occurs, no guarantee is made about how much data got
1860    written.  Callers that can deal with partial writes should call
1861    target_write.  */
1862 
1863 int
1864 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1865 {
1866   if (target_write (current_inferior ()->top_target (),
1867 		    TARGET_OBJECT_RAW_MEMORY, NULL,
1868 		    myaddr, memaddr, len) == len)
1869     return 0;
1870   else
1871     return -1;
1872 }
1873 
1874 /* Fetch the target's memory map.  */
1875 
1876 std::vector<mem_region>
1877 target_memory_map (void)
1878 {
1879   target_ops *target = current_inferior ()->top_target ();
1880   std::vector<mem_region> result = target->memory_map ();
1881   if (result.empty ())
1882     return result;
1883 
1884   std::sort (result.begin (), result.end ());
1885 
1886   /* Check that regions do not overlap.  Simultaneously assign
1887      a numbering for the "mem" commands to use to refer to
1888      each region.  */
1889   mem_region *last_one = NULL;
1890   for (size_t ix = 0; ix < result.size (); ix++)
1891     {
1892       mem_region *this_one = &result[ix];
1893       this_one->number = ix;
1894 
1895       if (last_one != NULL && last_one->hi > this_one->lo)
1896 	{
1897 	  warning (_("Overlapping regions in memory map: ignoring"));
1898 	  return std::vector<mem_region> ();
1899 	}
1900 
1901       last_one = this_one;
1902     }
1903 
1904   return result;
1905 }
1906 
1907 void
1908 target_flash_erase (ULONGEST address, LONGEST length)
1909 {
1910   current_inferior ()->top_target ()->flash_erase (address, length);
1911 }
1912 
1913 void
1914 target_flash_done (void)
1915 {
1916   current_inferior ()->top_target ()->flash_done ();
1917 }
1918 
1919 static void
1920 show_trust_readonly (struct ui_file *file, int from_tty,
1921 		     struct cmd_list_element *c, const char *value)
1922 {
1923   gdb_printf (file,
1924 	      _("Mode for reading from readonly sections is %s.\n"),
1925 	      value);
1926 }
1927 
1928 /* Target vector read/write partial wrapper functions.  */
1929 
1930 static enum target_xfer_status
1931 target_read_partial (struct target_ops *ops,
1932 		     enum target_object object,
1933 		     const char *annex, gdb_byte *buf,
1934 		     ULONGEST offset, ULONGEST len,
1935 		     ULONGEST *xfered_len)
1936 {
1937   return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1938 			      xfered_len);
1939 }
1940 
1941 static enum target_xfer_status
1942 target_write_partial (struct target_ops *ops,
1943 		      enum target_object object,
1944 		      const char *annex, const gdb_byte *buf,
1945 		      ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1946 {
1947   return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1948 			      xfered_len);
1949 }
1950 
1951 /* Wrappers to perform the full transfer.  */
1952 
1953 /* For docs on target_read see target.h.  */
1954 
1955 LONGEST
1956 target_read (struct target_ops *ops,
1957 	     enum target_object object,
1958 	     const char *annex, gdb_byte *buf,
1959 	     ULONGEST offset, LONGEST len)
1960 {
1961   LONGEST xfered_total = 0;
1962   int unit_size = 1;
1963 
1964   /* If we are reading from a memory object, find the length of an addressable
1965      unit for that architecture.  */
1966   if (object == TARGET_OBJECT_MEMORY
1967       || object == TARGET_OBJECT_STACK_MEMORY
1968       || object == TARGET_OBJECT_CODE_MEMORY
1969       || object == TARGET_OBJECT_RAW_MEMORY)
1970     unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1971 
1972   while (xfered_total < len)
1973     {
1974       ULONGEST xfered_partial;
1975       enum target_xfer_status status;
1976 
1977       status = target_read_partial (ops, object, annex,
1978 				    buf + xfered_total * unit_size,
1979 				    offset + xfered_total, len - xfered_total,
1980 				    &xfered_partial);
1981 
1982       /* Call an observer, notifying them of the xfer progress?  */
1983       if (status == TARGET_XFER_EOF)
1984 	return xfered_total;
1985       else if (status == TARGET_XFER_OK)
1986 	{
1987 	  xfered_total += xfered_partial;
1988 	  QUIT;
1989 	}
1990       else
1991 	return TARGET_XFER_E_IO;
1992 
1993     }
1994   return len;
1995 }
1996 
1997 /* Assuming that the entire [begin, end) range of memory cannot be
1998    read, try to read whatever subrange is possible to read.
1999 
2000    The function returns, in RESULT, either zero or one memory block.
2001    If there's a readable subrange at the beginning, it is completely
2002    read and returned.  Any further readable subrange will not be read.
2003    Otherwise, if there's a readable subrange at the end, it will be
2004    completely read and returned.  Any readable subranges before it
2005    (obviously, not starting at the beginning), will be ignored.  In
2006    other cases -- either no readable subrange, or readable subrange(s)
2007    that is neither at the beginning, or end, nothing is returned.
2008 
2009    The purpose of this function is to handle a read across a boundary
2010    of accessible memory in a case when memory map is not available.
2011    The above restrictions are fine for this case, but will give
2012    incorrect results if the memory is 'patchy'.  However, supporting
2013    'patchy' memory would require trying to read every single byte,
2014    and it seems unacceptable solution.  Explicit memory map is
2015    recommended for this case -- and target_read_memory_robust will
2016    take care of reading multiple ranges then.  */
2017 
2018 static void
2019 read_whatever_is_readable (struct target_ops *ops,
2020 			   const ULONGEST begin, const ULONGEST end,
2021 			   int unit_size,
2022 			   std::vector<memory_read_result> *result)
2023 {
2024   ULONGEST current_begin = begin;
2025   ULONGEST current_end = end;
2026   int forward;
2027   ULONGEST xfered_len;
2028 
2029   /* If we previously failed to read 1 byte, nothing can be done here.  */
2030   if (end - begin <= 1)
2031     return;
2032 
2033   gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2034 
2035   /* Check that either first or the last byte is readable, and give up
2036      if not.  This heuristic is meant to permit reading accessible memory
2037      at the boundary of accessible region.  */
2038   if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2039 			   buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2040     {
2041       forward = 1;
2042       ++current_begin;
2043     }
2044   else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2045 				buf.get () + (end - begin) - 1, end - 1, 1,
2046 				&xfered_len) == TARGET_XFER_OK)
2047     {
2048       forward = 0;
2049       --current_end;
2050     }
2051   else
2052     return;
2053 
2054   /* Loop invariant is that the [current_begin, current_end) was previously
2055      found to be not readable as a whole.
2056 
2057      Note loop condition -- if the range has 1 byte, we can't divide the range
2058      so there's no point trying further.  */
2059   while (current_end - current_begin > 1)
2060     {
2061       ULONGEST first_half_begin, first_half_end;
2062       ULONGEST second_half_begin, second_half_end;
2063       LONGEST xfer;
2064       ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2065 
2066       if (forward)
2067 	{
2068 	  first_half_begin = current_begin;
2069 	  first_half_end = middle;
2070 	  second_half_begin = middle;
2071 	  second_half_end = current_end;
2072 	}
2073       else
2074 	{
2075 	  first_half_begin = middle;
2076 	  first_half_end = current_end;
2077 	  second_half_begin = current_begin;
2078 	  second_half_end = middle;
2079 	}
2080 
2081       xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2082 			  buf.get () + (first_half_begin - begin) * unit_size,
2083 			  first_half_begin,
2084 			  first_half_end - first_half_begin);
2085 
2086       if (xfer == first_half_end - first_half_begin)
2087 	{
2088 	  /* This half reads up fine.  So, the error must be in the
2089 	     other half.  */
2090 	  current_begin = second_half_begin;
2091 	  current_end = second_half_end;
2092 	}
2093       else
2094 	{
2095 	  /* This half is not readable.  Because we've tried one byte, we
2096 	     know some part of this half if actually readable.  Go to the next
2097 	     iteration to divide again and try to read.
2098 
2099 	     We don't handle the other half, because this function only tries
2100 	     to read a single readable subrange.  */
2101 	  current_begin = first_half_begin;
2102 	  current_end = first_half_end;
2103 	}
2104     }
2105 
2106   if (forward)
2107     {
2108       /* The [begin, current_begin) range has been read.  */
2109       result->emplace_back (begin, current_end, std::move (buf));
2110     }
2111   else
2112     {
2113       /* The [current_end, end) range has been read.  */
2114       LONGEST region_len = end - current_end;
2115 
2116       gdb::unique_xmalloc_ptr<gdb_byte> data
2117 	((gdb_byte *) xmalloc (region_len * unit_size));
2118       memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2119 	      region_len * unit_size);
2120       result->emplace_back (current_end, end, std::move (data));
2121     }
2122 }
2123 
2124 std::vector<memory_read_result>
2125 read_memory_robust (struct target_ops *ops,
2126 		    const ULONGEST offset, const LONGEST len)
2127 {
2128   std::vector<memory_read_result> result;
2129   int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2130 
2131   LONGEST xfered_total = 0;
2132   while (xfered_total < len)
2133     {
2134       struct mem_region *region = lookup_mem_region (offset + xfered_total);
2135       LONGEST region_len;
2136 
2137       /* If there is no explicit region, a fake one should be created.  */
2138       gdb_assert (region);
2139 
2140       if (region->hi == 0)
2141 	region_len = len - xfered_total;
2142       else
2143 	region_len = region->hi - offset;
2144 
2145       if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2146 	{
2147 	  /* Cannot read this region.  Note that we can end up here only
2148 	     if the region is explicitly marked inaccessible, or
2149 	     'inaccessible-by-default' is in effect.  */
2150 	  xfered_total += region_len;
2151 	}
2152       else
2153 	{
2154 	  LONGEST to_read = std::min (len - xfered_total, region_len);
2155 	  gdb::unique_xmalloc_ptr<gdb_byte> buffer
2156 	    ((gdb_byte *) xmalloc (to_read * unit_size));
2157 
2158 	  LONGEST xfered_partial =
2159 	      target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2160 			   offset + xfered_total, to_read);
2161 	  /* Call an observer, notifying them of the xfer progress?  */
2162 	  if (xfered_partial <= 0)
2163 	    {
2164 	      /* Got an error reading full chunk.  See if maybe we can read
2165 		 some subrange.  */
2166 	      read_whatever_is_readable (ops, offset + xfered_total,
2167 					 offset + xfered_total + to_read,
2168 					 unit_size, &result);
2169 	      xfered_total += to_read;
2170 	    }
2171 	  else
2172 	    {
2173 	      result.emplace_back (offset + xfered_total,
2174 				   offset + xfered_total + xfered_partial,
2175 				   std::move (buffer));
2176 	      xfered_total += xfered_partial;
2177 	    }
2178 	  QUIT;
2179 	}
2180     }
2181 
2182   return result;
2183 }
2184 
2185 
2186 /* An alternative to target_write with progress callbacks.  */
2187 
2188 LONGEST
2189 target_write_with_progress (struct target_ops *ops,
2190 			    enum target_object object,
2191 			    const char *annex, const gdb_byte *buf,
2192 			    ULONGEST offset, LONGEST len,
2193 			    void (*progress) (ULONGEST, void *), void *baton)
2194 {
2195   LONGEST xfered_total = 0;
2196   int unit_size = 1;
2197 
2198   /* If we are writing to a memory object, find the length of an addressable
2199      unit for that architecture.  */
2200   if (object == TARGET_OBJECT_MEMORY
2201       || object == TARGET_OBJECT_STACK_MEMORY
2202       || object == TARGET_OBJECT_CODE_MEMORY
2203       || object == TARGET_OBJECT_RAW_MEMORY)
2204     unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2205 
2206   /* Give the progress callback a chance to set up.  */
2207   if (progress)
2208     (*progress) (0, baton);
2209 
2210   while (xfered_total < len)
2211     {
2212       ULONGEST xfered_partial;
2213       enum target_xfer_status status;
2214 
2215       status = target_write_partial (ops, object, annex,
2216 				     buf + xfered_total * unit_size,
2217 				     offset + xfered_total, len - xfered_total,
2218 				     &xfered_partial);
2219 
2220       if (status != TARGET_XFER_OK)
2221 	return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2222 
2223       if (progress)
2224 	(*progress) (xfered_partial, baton);
2225 
2226       xfered_total += xfered_partial;
2227       QUIT;
2228     }
2229   return len;
2230 }
2231 
2232 /* For docs on target_write see target.h.  */
2233 
2234 LONGEST
2235 target_write (struct target_ops *ops,
2236 	      enum target_object object,
2237 	      const char *annex, const gdb_byte *buf,
2238 	      ULONGEST offset, LONGEST len)
2239 {
2240   return target_write_with_progress (ops, object, annex, buf, offset, len,
2241 				     NULL, NULL);
2242 }
2243 
2244 /* Help for target_read_alloc and target_read_stralloc.  See their comments
2245    for details.  */
2246 
2247 template <typename T>
2248 gdb::optional<gdb::def_vector<T>>
2249 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2250 		     const char *annex)
2251 {
2252   gdb::def_vector<T> buf;
2253   size_t buf_pos = 0;
2254   const int chunk = 4096;
2255 
2256   /* This function does not have a length parameter; it reads the
2257      entire OBJECT).  Also, it doesn't support objects fetched partly
2258      from one target and partly from another (in a different stratum,
2259      e.g. a core file and an executable).  Both reasons make it
2260      unsuitable for reading memory.  */
2261   gdb_assert (object != TARGET_OBJECT_MEMORY);
2262 
2263   /* Start by reading up to 4K at a time.  The target will throttle
2264      this number down if necessary.  */
2265   while (1)
2266     {
2267       ULONGEST xfered_len;
2268       enum target_xfer_status status;
2269 
2270       buf.resize (buf_pos + chunk);
2271 
2272       status = target_read_partial (ops, object, annex,
2273 				    (gdb_byte *) &buf[buf_pos],
2274 				    buf_pos, chunk,
2275 				    &xfered_len);
2276 
2277       if (status == TARGET_XFER_EOF)
2278 	{
2279 	  /* Read all there was.  */
2280 	  buf.resize (buf_pos);
2281 	  return buf;
2282 	}
2283       else if (status != TARGET_XFER_OK)
2284 	{
2285 	  /* An error occurred.  */
2286 	  return {};
2287 	}
2288 
2289       buf_pos += xfered_len;
2290 
2291       QUIT;
2292     }
2293 }
2294 
2295 /* See target.h  */
2296 
2297 gdb::optional<gdb::byte_vector>
2298 target_read_alloc (struct target_ops *ops, enum target_object object,
2299 		   const char *annex)
2300 {
2301   return target_read_alloc_1<gdb_byte> (ops, object, annex);
2302 }
2303 
2304 /* See target.h.  */
2305 
2306 gdb::optional<gdb::char_vector>
2307 target_read_stralloc (struct target_ops *ops, enum target_object object,
2308 		      const char *annex)
2309 {
2310   gdb::optional<gdb::char_vector> buf
2311     = target_read_alloc_1<char> (ops, object, annex);
2312 
2313   if (!buf)
2314     return {};
2315 
2316   if (buf->empty () || buf->back () != '\0')
2317     buf->push_back ('\0');
2318 
2319   /* Check for embedded NUL bytes; but allow trailing NULs.  */
2320   for (auto it = std::find (buf->begin (), buf->end (), '\0');
2321        it != buf->end (); it++)
2322     if (*it != '\0')
2323       {
2324 	warning (_("target object %d, annex %s, "
2325 		   "contained unexpected null characters"),
2326 		 (int) object, annex ? annex : "(none)");
2327 	break;
2328       }
2329 
2330   return buf;
2331 }
2332 
2333 /* Memory transfer methods.  */
2334 
2335 void
2336 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2337 		   LONGEST len)
2338 {
2339   /* This method is used to read from an alternate, non-current
2340      target.  This read must bypass the overlay support (as symbols
2341      don't match this target), and GDB's internal cache (wrong cache
2342      for this target).  */
2343   if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2344       != len)
2345     memory_error (TARGET_XFER_E_IO, addr);
2346 }
2347 
2348 ULONGEST
2349 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2350 			    int len, enum bfd_endian byte_order)
2351 {
2352   gdb_byte buf[sizeof (ULONGEST)];
2353 
2354   gdb_assert (len <= sizeof (buf));
2355   get_target_memory (ops, addr, buf, len);
2356   return extract_unsigned_integer (buf, len, byte_order);
2357 }
2358 
2359 /* See target.h.  */
2360 
2361 int
2362 target_insert_breakpoint (struct gdbarch *gdbarch,
2363 			  struct bp_target_info *bp_tgt)
2364 {
2365   if (!may_insert_breakpoints)
2366     {
2367       warning (_("May not insert breakpoints"));
2368       return 1;
2369     }
2370 
2371   target_ops *target = current_inferior ()->top_target ();
2372 
2373   return target->insert_breakpoint (gdbarch, bp_tgt);
2374 }
2375 
2376 /* See target.h.  */
2377 
2378 int
2379 target_remove_breakpoint (struct gdbarch *gdbarch,
2380 			  struct bp_target_info *bp_tgt,
2381 			  enum remove_bp_reason reason)
2382 {
2383   /* This is kind of a weird case to handle, but the permission might
2384      have been changed after breakpoints were inserted - in which case
2385      we should just take the user literally and assume that any
2386      breakpoints should be left in place.  */
2387   if (!may_insert_breakpoints)
2388     {
2389       warning (_("May not remove breakpoints"));
2390       return 1;
2391     }
2392 
2393   target_ops *target = current_inferior ()->top_target ();
2394 
2395   return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2396 }
2397 
2398 static void
2399 info_target_command (const char *args, int from_tty)
2400 {
2401   int has_all_mem = 0;
2402 
2403   if (current_program_space->symfile_object_file != NULL)
2404     {
2405       objfile *objf = current_program_space->symfile_object_file;
2406       gdb_printf (_("Symbols from \"%s\".\n"),
2407 		  objfile_name (objf));
2408     }
2409 
2410   for (target_ops *t = current_inferior ()->top_target ();
2411        t != NULL;
2412        t = t->beneath ())
2413     {
2414       if (!t->has_memory ())
2415 	continue;
2416 
2417       if ((int) (t->stratum ()) <= (int) dummy_stratum)
2418 	continue;
2419       if (has_all_mem)
2420 	gdb_printf (_("\tWhile running this, "
2421 		      "GDB does not access memory from...\n"));
2422       gdb_printf ("%s:\n", t->longname ());
2423       t->files_info ();
2424       has_all_mem = t->has_all_memory ();
2425     }
2426 }
2427 
2428 /* This function is called before any new inferior is created, e.g.
2429    by running a program, attaching, or connecting to a target.
2430    It cleans up any state from previous invocations which might
2431    change between runs.  This is a subset of what target_preopen
2432    resets (things which might change between targets).  */
2433 
2434 void
2435 target_pre_inferior (int from_tty)
2436 {
2437   /* Clear out solib state.  Otherwise the solib state of the previous
2438      inferior might have survived and is entirely wrong for the new
2439      target.  This has been observed on GNU/Linux using glibc 2.3.  How
2440      to reproduce:
2441 
2442      bash$ ./foo&
2443      [1] 4711
2444      bash$ ./foo&
2445      [1] 4712
2446      bash$ gdb ./foo
2447      [...]
2448      (gdb) attach 4711
2449      (gdb) detach
2450      (gdb) attach 4712
2451      Cannot access memory at address 0xdeadbeef
2452   */
2453 
2454   /* In some OSs, the shared library list is the same/global/shared
2455      across inferiors.  If code is shared between processes, so are
2456      memory regions and features.  */
2457   if (!gdbarch_has_global_solist (target_gdbarch ()))
2458     {
2459       no_shared_libraries (NULL, from_tty);
2460 
2461       invalidate_target_mem_regions ();
2462 
2463       target_clear_description ();
2464     }
2465 
2466   /* attach_flag may be set if the previous process associated with
2467      the inferior was attached to.  */
2468   current_inferior ()->attach_flag = false;
2469 
2470   current_inferior ()->highest_thread_num = 0;
2471 
2472   agent_capability_invalidate ();
2473 }
2474 
2475 /* This is to be called by the open routine before it does
2476    anything.  */
2477 
2478 void
2479 target_preopen (int from_tty)
2480 {
2481   dont_repeat ();
2482 
2483   if (current_inferior ()->pid != 0)
2484     {
2485       if (!from_tty
2486 	  || !target_has_execution ()
2487 	  || query (_("A program is being debugged already.  Kill it? ")))
2488 	{
2489 	  /* Core inferiors actually should be detached, not
2490 	     killed.  */
2491 	  if (target_has_execution ())
2492 	    target_kill ();
2493 	  else
2494 	    target_detach (current_inferior (), 0);
2495 	}
2496       else
2497 	error (_("Program not killed."));
2498     }
2499 
2500   /* Calling target_kill may remove the target from the stack.  But if
2501      it doesn't (which seems like a win for UDI), remove it now.  */
2502   /* Leave the exec target, though.  The user may be switching from a
2503      live process to a core of the same program.  */
2504   current_inferior ()->pop_all_targets_above (file_stratum);
2505 
2506   target_pre_inferior (from_tty);
2507 }
2508 
2509 /* See target.h.  */
2510 
2511 void
2512 target_detach (inferior *inf, int from_tty)
2513 {
2514   /* Thread's don't need to be resumed until the end of this function.  */
2515   scoped_disable_commit_resumed disable_commit_resumed ("detaching");
2516 
2517   /* After we have detached, we will clear the register cache for this inferior
2518      by calling registers_changed_ptid.  We must save the pid_ptid before
2519      detaching, as the target detach method will clear inf->pid.  */
2520   ptid_t save_pid_ptid = ptid_t (inf->pid);
2521 
2522   /* As long as some to_detach implementations rely on the current_inferior
2523      (either directly, or indirectly, like through target_gdbarch or by
2524      reading memory), INF needs to be the current inferior.  When that
2525      requirement will become no longer true, then we can remove this
2526      assertion.  */
2527   gdb_assert (inf == current_inferior ());
2528 
2529   prepare_for_detach ();
2530 
2531   /* Hold a strong reference because detaching may unpush the
2532      target.  */
2533   auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2534 
2535   current_inferior ()->top_target ()->detach (inf, from_tty);
2536 
2537   process_stratum_target *proc_target
2538     = as_process_stratum_target (proc_target_ref.get ());
2539 
2540   registers_changed_ptid (proc_target, save_pid_ptid);
2541 
2542   /* We have to ensure we have no frame cache left.  Normally,
2543      registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2544      inferior_ptid matches save_pid_ptid, but in our case, it does not
2545      call it, as inferior_ptid has been reset.  */
2546   reinit_frame_cache ();
2547 
2548   disable_commit_resumed.reset_and_commit ();
2549 }
2550 
2551 void
2552 target_disconnect (const char *args, int from_tty)
2553 {
2554   /* If we're in breakpoints-always-inserted mode or if breakpoints
2555      are global across processes, we have to remove them before
2556      disconnecting.  */
2557   remove_breakpoints ();
2558 
2559   current_inferior ()->top_target ()->disconnect (args, from_tty);
2560 }
2561 
2562 /* See target/target.h.  */
2563 
2564 ptid_t
2565 target_wait (ptid_t ptid, struct target_waitstatus *status,
2566 	     target_wait_flags options)
2567 {
2568   target_ops *target = current_inferior ()->top_target ();
2569   process_stratum_target *proc_target = current_inferior ()->process_target ();
2570 
2571   gdb_assert (!proc_target->commit_resumed_state);
2572 
2573   if (!target_can_async_p (target))
2574     gdb_assert ((options & TARGET_WNOHANG) == 0);
2575 
2576   try
2577     {
2578       gdb::observers::target_pre_wait.notify (ptid);
2579       ptid_t event_ptid = target->wait (ptid, status, options);
2580       gdb::observers::target_post_wait.notify (event_ptid);
2581       return event_ptid;
2582     }
2583   catch (...)
2584     {
2585       gdb::observers::target_post_wait.notify (null_ptid);
2586       throw;
2587     }
2588 }
2589 
2590 /* See target.h.  */
2591 
2592 ptid_t
2593 default_target_wait (struct target_ops *ops,
2594 		     ptid_t ptid, struct target_waitstatus *status,
2595 		     target_wait_flags options)
2596 {
2597   status->set_ignore ();
2598   return minus_one_ptid;
2599 }
2600 
2601 std::string
2602 target_pid_to_str (ptid_t ptid)
2603 {
2604   return current_inferior ()->top_target ()->pid_to_str (ptid);
2605 }
2606 
2607 const char *
2608 target_thread_name (struct thread_info *info)
2609 {
2610   gdb_assert (info->inf == current_inferior ());
2611 
2612   return current_inferior ()->top_target ()->thread_name (info);
2613 }
2614 
2615 struct thread_info *
2616 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2617 				     int handle_len,
2618 				     struct inferior *inf)
2619 {
2620   target_ops *target = current_inferior ()->top_target ();
2621 
2622   return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2623 }
2624 
2625 /* See target.h.  */
2626 
2627 gdb::byte_vector
2628 target_thread_info_to_thread_handle (struct thread_info *tip)
2629 {
2630   target_ops *target = current_inferior ()->top_target ();
2631 
2632   return target->thread_info_to_thread_handle (tip);
2633 }
2634 
2635 void
2636 target_resume (ptid_t scope_ptid, int step, enum gdb_signal signal)
2637 {
2638   process_stratum_target *curr_target = current_inferior ()->process_target ();
2639   gdb_assert (!curr_target->commit_resumed_state);
2640 
2641   gdb_assert (inferior_ptid != null_ptid);
2642   gdb_assert (inferior_ptid.matches (scope_ptid));
2643 
2644   target_dcache_invalidate ();
2645 
2646   current_inferior ()->top_target ()->resume (scope_ptid, step, signal);
2647 
2648   registers_changed_ptid (curr_target, scope_ptid);
2649   /* We only set the internal executing state here.  The user/frontend
2650      running state is set at a higher level.  This also clears the
2651      thread's stop_pc as side effect.  */
2652   set_executing (curr_target, scope_ptid, true);
2653   clear_inline_frame_state (curr_target, scope_ptid);
2654 
2655   if (target_can_async_p ())
2656     target_async (true);
2657 }
2658 
2659 /* See target.h.  */
2660 
2661 void
2662 target_commit_resumed ()
2663 {
2664   gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2665   current_inferior ()->top_target ()->commit_resumed ();
2666 }
2667 
2668 /* See target.h.  */
2669 
2670 bool
2671 target_has_pending_events ()
2672 {
2673   return current_inferior ()->top_target ()->has_pending_events ();
2674 }
2675 
2676 void
2677 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2678 {
2679   current_inferior ()->top_target ()->pass_signals (pass_signals);
2680 }
2681 
2682 void
2683 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2684 {
2685   current_inferior ()->top_target ()->program_signals (program_signals);
2686 }
2687 
2688 static void
2689 default_follow_fork (struct target_ops *self, inferior *child_inf,
2690 		     ptid_t child_ptid, target_waitkind fork_kind,
2691 		     bool follow_child, bool detach_fork)
2692 {
2693   /* Some target returned a fork event, but did not know how to follow it.  */
2694   internal_error (_("could not find a target to follow fork"));
2695 }
2696 
2697 /* See target.h.  */
2698 
2699 void
2700 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2701 		    target_waitkind fork_kind, bool follow_child,
2702 		    bool detach_fork)
2703 {
2704   target_ops *target = current_inferior ()->top_target ();
2705 
2706   /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2707      DETACH_FORK.  */
2708   if (child_inf != nullptr)
2709     {
2710       gdb_assert (follow_child || !detach_fork);
2711       gdb_assert (child_inf->pid == child_ptid.pid ());
2712     }
2713   else
2714     gdb_assert (!follow_child && detach_fork);
2715 
2716   return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2717 			      detach_fork);
2718 }
2719 
2720 /* See target.h.  */
2721 
2722 void
2723 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2724 		    const char *execd_pathname)
2725 {
2726   current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2727 						   execd_pathname);
2728 }
2729 
2730 static void
2731 default_mourn_inferior (struct target_ops *self)
2732 {
2733   internal_error (_("could not find a target to follow mourn inferior"));
2734 }
2735 
2736 void
2737 target_mourn_inferior (ptid_t ptid)
2738 {
2739   gdb_assert (ptid.pid () == inferior_ptid.pid ());
2740   current_inferior ()->top_target ()->mourn_inferior ();
2741 
2742   /* We no longer need to keep handles on any of the object files.
2743      Make sure to release them to avoid unnecessarily locking any
2744      of them while we're not actually debugging.  */
2745   bfd_cache_close_all ();
2746 }
2747 
2748 /* Look for a target which can describe architectural features, starting
2749    from TARGET.  If we find one, return its description.  */
2750 
2751 const struct target_desc *
2752 target_read_description (struct target_ops *target)
2753 {
2754   return target->read_description ();
2755 }
2756 
2757 
2758 /* Default implementation of memory-searching.  */
2759 
2760 static int
2761 default_search_memory (struct target_ops *self,
2762 		       CORE_ADDR start_addr, ULONGEST search_space_len,
2763 		       const gdb_byte *pattern, ULONGEST pattern_len,
2764 		       CORE_ADDR *found_addrp)
2765 {
2766   auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2767     {
2768       return target_read (current_inferior ()->top_target (),
2769 			  TARGET_OBJECT_MEMORY, NULL,
2770 			  result, addr, len) == len;
2771     };
2772 
2773   /* Start over from the top of the target stack.  */
2774   return simple_search_memory (read_memory, start_addr, search_space_len,
2775 			       pattern, pattern_len, found_addrp);
2776 }
2777 
2778 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2779    sequence of bytes in PATTERN with length PATTERN_LEN.
2780 
2781    The result is 1 if found, 0 if not found, and -1 if there was an error
2782    requiring halting of the search (e.g. memory read error).
2783    If the pattern is found the address is recorded in FOUND_ADDRP.  */
2784 
2785 int
2786 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2787 		      const gdb_byte *pattern, ULONGEST pattern_len,
2788 		      CORE_ADDR *found_addrp)
2789 {
2790   target_ops *target = current_inferior ()->top_target ();
2791 
2792   return target->search_memory (start_addr, search_space_len, pattern,
2793 				pattern_len, found_addrp);
2794 }
2795 
2796 /* Look through the currently pushed targets.  If none of them will
2797    be able to restart the currently running process, issue an error
2798    message.  */
2799 
2800 void
2801 target_require_runnable (void)
2802 {
2803   for (target_ops *t = current_inferior ()->top_target ();
2804        t != NULL;
2805        t = t->beneath ())
2806     {
2807       /* If this target knows how to create a new program, then
2808 	 assume we will still be able to after killing the current
2809 	 one.  Either killing and mourning will not pop T, or else
2810 	 find_default_run_target will find it again.  */
2811       if (t->can_create_inferior ())
2812 	return;
2813 
2814       /* Do not worry about targets at certain strata that can not
2815 	 create inferiors.  Assume they will be pushed again if
2816 	 necessary, and continue to the process_stratum.  */
2817       if (t->stratum () > process_stratum)
2818 	continue;
2819 
2820       error (_("The \"%s\" target does not support \"run\".  "
2821 	       "Try \"help target\" or \"continue\"."),
2822 	     t->shortname ());
2823     }
2824 
2825   /* This function is only called if the target is running.  In that
2826      case there should have been a process_stratum target and it
2827      should either know how to create inferiors, or not...  */
2828   internal_error (_("No targets found"));
2829 }
2830 
2831 /* Whether GDB is allowed to fall back to the default run target for
2832    "run", "attach", etc. when no target is connected yet.  */
2833 static bool auto_connect_native_target = true;
2834 
2835 static void
2836 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2837 				 struct cmd_list_element *c, const char *value)
2838 {
2839   gdb_printf (file,
2840 	      _("Whether GDB may automatically connect to the "
2841 		"native target is %s.\n"),
2842 	      value);
2843 }
2844 
2845 /* A pointer to the target that can respond to "run" or "attach".
2846    Native targets are always singletons and instantiated early at GDB
2847    startup.  */
2848 static target_ops *the_native_target;
2849 
2850 /* See target.h.  */
2851 
2852 void
2853 set_native_target (target_ops *target)
2854 {
2855   if (the_native_target != NULL)
2856     internal_error (_("native target already set (\"%s\")."),
2857 		    the_native_target->longname ());
2858 
2859   the_native_target = target;
2860 }
2861 
2862 /* See target.h.  */
2863 
2864 target_ops *
2865 get_native_target ()
2866 {
2867   return the_native_target;
2868 }
2869 
2870 /* Look through the list of possible targets for a target that can
2871    execute a run or attach command without any other data.  This is
2872    used to locate the default process stratum.
2873 
2874    If DO_MESG is not NULL, the result is always valid (error() is
2875    called for errors); else, return NULL on error.  */
2876 
2877 static struct target_ops *
2878 find_default_run_target (const char *do_mesg)
2879 {
2880   if (auto_connect_native_target && the_native_target != NULL)
2881     return the_native_target;
2882 
2883   if (do_mesg != NULL)
2884     error (_("Don't know how to %s.  Try \"help target\"."), do_mesg);
2885   return NULL;
2886 }
2887 
2888 /* See target.h.  */
2889 
2890 struct target_ops *
2891 find_attach_target (void)
2892 {
2893   /* If a target on the current stack can attach, use it.  */
2894   for (target_ops *t = current_inferior ()->top_target ();
2895        t != NULL;
2896        t = t->beneath ())
2897     {
2898       if (t->can_attach ())
2899 	return t;
2900     }
2901 
2902   /* Otherwise, use the default run target for attaching.  */
2903   return find_default_run_target ("attach");
2904 }
2905 
2906 /* See target.h.  */
2907 
2908 struct target_ops *
2909 find_run_target (void)
2910 {
2911   /* If a target on the current stack can run, use it.  */
2912   for (target_ops *t = current_inferior ()->top_target ();
2913        t != NULL;
2914        t = t->beneath ())
2915     {
2916       if (t->can_create_inferior ())
2917 	return t;
2918     }
2919 
2920   /* Otherwise, use the default run target.  */
2921   return find_default_run_target ("run");
2922 }
2923 
2924 bool
2925 target_ops::info_proc (const char *args, enum info_proc_what what)
2926 {
2927   return false;
2928 }
2929 
2930 /* Implement the "info proc" command.  */
2931 
2932 int
2933 target_info_proc (const char *args, enum info_proc_what what)
2934 {
2935   struct target_ops *t;
2936 
2937   /* If we're already connected to something that can get us OS
2938      related data, use it.  Otherwise, try using the native
2939      target.  */
2940   t = find_target_at (process_stratum);
2941   if (t == NULL)
2942     t = find_default_run_target (NULL);
2943 
2944   for (; t != NULL; t = t->beneath ())
2945     {
2946       if (t->info_proc (args, what))
2947 	{
2948 	  if (targetdebug)
2949 	    gdb_printf (gdb_stdlog,
2950 			"target_info_proc (\"%s\", %d)\n", args, what);
2951 
2952 	  return 1;
2953 	}
2954     }
2955 
2956   return 0;
2957 }
2958 
2959 static int
2960 find_default_supports_disable_randomization (struct target_ops *self)
2961 {
2962   struct target_ops *t;
2963 
2964   t = find_default_run_target (NULL);
2965   if (t != NULL)
2966     return t->supports_disable_randomization ();
2967   return 0;
2968 }
2969 
2970 int
2971 target_supports_disable_randomization (void)
2972 {
2973   return current_inferior ()->top_target ()->supports_disable_randomization ();
2974 }
2975 
2976 /* See target/target.h.  */
2977 
2978 int
2979 target_supports_multi_process (void)
2980 {
2981   return current_inferior ()->top_target ()->supports_multi_process ();
2982 }
2983 
2984 /* See target.h.  */
2985 
2986 gdb::optional<gdb::char_vector>
2987 target_get_osdata (const char *type)
2988 {
2989   struct target_ops *t;
2990 
2991   /* If we're already connected to something that can get us OS
2992      related data, use it.  Otherwise, try using the native
2993      target.  */
2994   t = find_target_at (process_stratum);
2995   if (t == NULL)
2996     t = find_default_run_target ("get OS data");
2997 
2998   if (!t)
2999     return {};
3000 
3001   return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3002 }
3003 
3004 /* Determine the current address space of thread PTID.  */
3005 
3006 struct address_space *
3007 target_thread_address_space (ptid_t ptid)
3008 {
3009   struct address_space *aspace;
3010 
3011   aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3012   gdb_assert (aspace != NULL);
3013 
3014   return aspace;
3015 }
3016 
3017 /* See target.h.  */
3018 
3019 target_ops *
3020 target_ops::beneath () const
3021 {
3022   return current_inferior ()->find_target_beneath (this);
3023 }
3024 
3025 void
3026 target_ops::close ()
3027 {
3028 }
3029 
3030 bool
3031 target_ops::can_attach ()
3032 {
3033   return 0;
3034 }
3035 
3036 void
3037 target_ops::attach (const char *, int)
3038 {
3039   gdb_assert_not_reached ("target_ops::attach called");
3040 }
3041 
3042 bool
3043 target_ops::can_create_inferior ()
3044 {
3045   return 0;
3046 }
3047 
3048 void
3049 target_ops::create_inferior (const char *, const std::string &,
3050 			     char **, int)
3051 {
3052   gdb_assert_not_reached ("target_ops::create_inferior called");
3053 }
3054 
3055 bool
3056 target_ops::can_run ()
3057 {
3058   return false;
3059 }
3060 
3061 int
3062 target_can_run ()
3063 {
3064   for (target_ops *t = current_inferior ()->top_target ();
3065        t != NULL;
3066        t = t->beneath ())
3067     {
3068       if (t->can_run ())
3069 	return 1;
3070     }
3071 
3072   return 0;
3073 }
3074 
3075 /* Target file operations.  */
3076 
3077 static struct target_ops *
3078 default_fileio_target (void)
3079 {
3080   struct target_ops *t;
3081 
3082   /* If we're already connected to something that can perform
3083      file I/O, use it. Otherwise, try using the native target.  */
3084   t = find_target_at (process_stratum);
3085   if (t != NULL)
3086     return t;
3087   return find_default_run_target ("file I/O");
3088 }
3089 
3090 /* File handle for target file operations.  */
3091 
3092 struct fileio_fh_t
3093 {
3094   /* The target on which this file is open.  NULL if the target is
3095      meanwhile closed while the handle is open.  */
3096   target_ops *target;
3097 
3098   /* The file descriptor on the target.  */
3099   int target_fd;
3100 
3101   /* Check whether this fileio_fh_t represents a closed file.  */
3102   bool is_closed ()
3103   {
3104     return target_fd < 0;
3105   }
3106 };
3107 
3108 /* Vector of currently open file handles.  The value returned by
3109    target_fileio_open and passed as the FD argument to other
3110    target_fileio_* functions is an index into this vector.  This
3111    vector's entries are never freed; instead, files are marked as
3112    closed, and the handle becomes available for reuse.  */
3113 static std::vector<fileio_fh_t> fileio_fhandles;
3114 
3115 /* Index into fileio_fhandles of the lowest handle that might be
3116    closed.  This permits handle reuse without searching the whole
3117    list each time a new file is opened.  */
3118 static int lowest_closed_fd;
3119 
3120 /* See target.h.  */
3121 
3122 void
3123 fileio_handles_invalidate_target (target_ops *targ)
3124 {
3125   for (fileio_fh_t &fh : fileio_fhandles)
3126     if (fh.target == targ)
3127       fh.target = NULL;
3128 }
3129 
3130 /* Acquire a target fileio file descriptor.  */
3131 
3132 static int
3133 acquire_fileio_fd (target_ops *target, int target_fd)
3134 {
3135   /* Search for closed handles to reuse.  */
3136   for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3137     {
3138       fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3139 
3140       if (fh.is_closed ())
3141 	break;
3142     }
3143 
3144   /* Push a new handle if no closed handles were found.  */
3145   if (lowest_closed_fd == fileio_fhandles.size ())
3146     fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3147   else
3148     fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3149 
3150   /* Should no longer be marked closed.  */
3151   gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3152 
3153   /* Return its index, and start the next lookup at
3154      the next index.  */
3155   return lowest_closed_fd++;
3156 }
3157 
3158 /* Release a target fileio file descriptor.  */
3159 
3160 static void
3161 release_fileio_fd (int fd, fileio_fh_t *fh)
3162 {
3163   fh->target_fd = -1;
3164   lowest_closed_fd = std::min (lowest_closed_fd, fd);
3165 }
3166 
3167 /* Return a pointer to the fileio_fhandle_t corresponding to FD.  */
3168 
3169 static fileio_fh_t *
3170 fileio_fd_to_fh (int fd)
3171 {
3172   return &fileio_fhandles[fd];
3173 }
3174 
3175 
3176 /* Default implementations of file i/o methods.  We don't want these
3177    to delegate automatically, because we need to know which target
3178    supported the method, in order to call it directly from within
3179    pread/pwrite, etc.  */
3180 
3181 int
3182 target_ops::fileio_open (struct inferior *inf, const char *filename,
3183 			 int flags, int mode, int warn_if_slow,
3184 			 fileio_error *target_errno)
3185 {
3186   *target_errno = FILEIO_ENOSYS;
3187   return -1;
3188 }
3189 
3190 int
3191 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3192 			   ULONGEST offset, fileio_error *target_errno)
3193 {
3194   *target_errno = FILEIO_ENOSYS;
3195   return -1;
3196 }
3197 
3198 int
3199 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3200 			  ULONGEST offset, fileio_error *target_errno)
3201 {
3202   *target_errno = FILEIO_ENOSYS;
3203   return -1;
3204 }
3205 
3206 int
3207 target_ops::fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3208 {
3209   *target_errno = FILEIO_ENOSYS;
3210   return -1;
3211 }
3212 
3213 int
3214 target_ops::fileio_close (int fd, fileio_error *target_errno)
3215 {
3216   *target_errno = FILEIO_ENOSYS;
3217   return -1;
3218 }
3219 
3220 int
3221 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3222 			   fileio_error *target_errno)
3223 {
3224   *target_errno = FILEIO_ENOSYS;
3225   return -1;
3226 }
3227 
3228 gdb::optional<std::string>
3229 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3230 			     fileio_error *target_errno)
3231 {
3232   *target_errno = FILEIO_ENOSYS;
3233   return {};
3234 }
3235 
3236 /* See target.h.  */
3237 
3238 int
3239 target_fileio_open (struct inferior *inf, const char *filename,
3240 		    int flags, int mode, bool warn_if_slow, fileio_error *target_errno)
3241 {
3242   for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3243     {
3244       int fd = t->fileio_open (inf, filename, flags, mode,
3245 			       warn_if_slow, target_errno);
3246 
3247       if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3248 	continue;
3249 
3250       if (fd < 0)
3251 	fd = -1;
3252       else
3253 	fd = acquire_fileio_fd (t, fd);
3254 
3255       if (targetdebug)
3256 	gdb_printf (gdb_stdlog,
3257 		    "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3258 		    " = %d (%d)\n",
3259 		    inf == NULL ? 0 : inf->num,
3260 		    filename, flags, mode,
3261 		    warn_if_slow, fd,
3262 		    fd != -1 ? 0 : *target_errno);
3263       return fd;
3264     }
3265 
3266   *target_errno = FILEIO_ENOSYS;
3267   return -1;
3268 }
3269 
3270 /* See target.h.  */
3271 
3272 int
3273 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3274 		      ULONGEST offset, fileio_error *target_errno)
3275 {
3276   fileio_fh_t *fh = fileio_fd_to_fh (fd);
3277   int ret = -1;
3278 
3279   if (fh->is_closed ())
3280     *target_errno = FILEIO_EBADF;
3281   else if (fh->target == NULL)
3282     *target_errno = FILEIO_EIO;
3283   else
3284     ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3285 				     len, offset, target_errno);
3286 
3287   if (targetdebug)
3288     gdb_printf (gdb_stdlog,
3289 		"target_fileio_pwrite (%d,...,%d,%s) "
3290 		"= %d (%d)\n",
3291 		fd, len, pulongest (offset),
3292 		ret, ret != -1 ? 0 : *target_errno);
3293   return ret;
3294 }
3295 
3296 /* See target.h.  */
3297 
3298 int
3299 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3300 		     ULONGEST offset, fileio_error *target_errno)
3301 {
3302   fileio_fh_t *fh = fileio_fd_to_fh (fd);
3303   int ret = -1;
3304 
3305   if (fh->is_closed ())
3306     *target_errno = FILEIO_EBADF;
3307   else if (fh->target == NULL)
3308     *target_errno = FILEIO_EIO;
3309   else
3310     ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3311 				    len, offset, target_errno);
3312 
3313   if (targetdebug)
3314     gdb_printf (gdb_stdlog,
3315 		"target_fileio_pread (%d,...,%d,%s) "
3316 		"= %d (%d)\n",
3317 		fd, len, pulongest (offset),
3318 		ret, ret != -1 ? 0 : *target_errno);
3319   return ret;
3320 }
3321 
3322 /* See target.h.  */
3323 
3324 int
3325 target_fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3326 {
3327   fileio_fh_t *fh = fileio_fd_to_fh (fd);
3328   int ret = -1;
3329 
3330   if (fh->is_closed ())
3331     *target_errno = FILEIO_EBADF;
3332   else if (fh->target == NULL)
3333     *target_errno = FILEIO_EIO;
3334   else
3335     ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3336 
3337   if (targetdebug)
3338     gdb_printf (gdb_stdlog,
3339 		"target_fileio_fstat (%d) = %d (%d)\n",
3340 		fd, ret, ret != -1 ? 0 : *target_errno);
3341   return ret;
3342 }
3343 
3344 /* See target.h.  */
3345 
3346 int
3347 target_fileio_close (int fd, fileio_error *target_errno)
3348 {
3349   fileio_fh_t *fh = fileio_fd_to_fh (fd);
3350   int ret = -1;
3351 
3352   if (fh->is_closed ())
3353     *target_errno = FILEIO_EBADF;
3354   else
3355     {
3356       if (fh->target != NULL)
3357 	ret = fh->target->fileio_close (fh->target_fd,
3358 					target_errno);
3359       else
3360 	ret = 0;
3361       release_fileio_fd (fd, fh);
3362     }
3363 
3364   if (targetdebug)
3365     gdb_printf (gdb_stdlog,
3366 		"target_fileio_close (%d) = %d (%d)\n",
3367 		fd, ret, ret != -1 ? 0 : *target_errno);
3368   return ret;
3369 }
3370 
3371 /* See target.h.  */
3372 
3373 int
3374 target_fileio_unlink (struct inferior *inf, const char *filename,
3375 		      fileio_error *target_errno)
3376 {
3377   for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3378     {
3379       int ret = t->fileio_unlink (inf, filename, target_errno);
3380 
3381       if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3382 	continue;
3383 
3384       if (targetdebug)
3385 	gdb_printf (gdb_stdlog,
3386 		    "target_fileio_unlink (%d,%s)"
3387 		    " = %d (%d)\n",
3388 		    inf == NULL ? 0 : inf->num, filename,
3389 		    ret, ret != -1 ? 0 : *target_errno);
3390       return ret;
3391     }
3392 
3393   *target_errno = FILEIO_ENOSYS;
3394   return -1;
3395 }
3396 
3397 /* See target.h.  */
3398 
3399 gdb::optional<std::string>
3400 target_fileio_readlink (struct inferior *inf, const char *filename,
3401 			fileio_error *target_errno)
3402 {
3403   for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3404     {
3405       gdb::optional<std::string> ret
3406 	= t->fileio_readlink (inf, filename, target_errno);
3407 
3408       if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3409 	continue;
3410 
3411       if (targetdebug)
3412 	gdb_printf (gdb_stdlog,
3413 		    "target_fileio_readlink (%d,%s)"
3414 		    " = %s (%d)\n",
3415 		    inf == NULL ? 0 : inf->num,
3416 		    filename, ret ? ret->c_str () : "(nil)",
3417 		    ret ? 0 : *target_errno);
3418       return ret;
3419     }
3420 
3421   *target_errno = FILEIO_ENOSYS;
3422   return {};
3423 }
3424 
3425 /* Like scoped_fd, but specific to target fileio.  */
3426 
3427 class scoped_target_fd
3428 {
3429 public:
3430   explicit scoped_target_fd (int fd) noexcept
3431     : m_fd (fd)
3432   {
3433   }
3434 
3435   ~scoped_target_fd ()
3436   {
3437     if (m_fd >= 0)
3438       {
3439 	fileio_error target_errno;
3440 
3441 	target_fileio_close (m_fd, &target_errno);
3442       }
3443   }
3444 
3445   DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3446 
3447   int get () const noexcept
3448   {
3449     return m_fd;
3450   }
3451 
3452 private:
3453   int m_fd;
3454 };
3455 
3456 /* Read target file FILENAME, in the filesystem as seen by INF.  If
3457    INF is NULL, use the filesystem seen by the debugger (GDB or, for
3458    remote targets, the remote stub).  Store the result in *BUF_P and
3459    return the size of the transferred data.  PADDING additional bytes
3460    are available in *BUF_P.  This is a helper function for
3461    target_fileio_read_alloc; see the declaration of that function for
3462    more information.  */
3463 
3464 static LONGEST
3465 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3466 			    gdb_byte **buf_p, int padding)
3467 {
3468   size_t buf_alloc, buf_pos;
3469   gdb_byte *buf;
3470   LONGEST n;
3471   fileio_error target_errno;
3472 
3473   scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3474 					   0700, false, &target_errno));
3475   if (fd.get () == -1)
3476     return -1;
3477 
3478   /* Start by reading up to 4K at a time.  The target will throttle
3479      this number down if necessary.  */
3480   buf_alloc = 4096;
3481   buf = (gdb_byte *) xmalloc (buf_alloc);
3482   buf_pos = 0;
3483   while (1)
3484     {
3485       n = target_fileio_pread (fd.get (), &buf[buf_pos],
3486 			       buf_alloc - buf_pos - padding, buf_pos,
3487 			       &target_errno);
3488       if (n < 0)
3489 	{
3490 	  /* An error occurred.  */
3491 	  xfree (buf);
3492 	  return -1;
3493 	}
3494       else if (n == 0)
3495 	{
3496 	  /* Read all there was.  */
3497 	  if (buf_pos == 0)
3498 	    xfree (buf);
3499 	  else
3500 	    *buf_p = buf;
3501 	  return buf_pos;
3502 	}
3503 
3504       buf_pos += n;
3505 
3506       /* If the buffer is filling up, expand it.  */
3507       if (buf_alloc < buf_pos * 2)
3508 	{
3509 	  buf_alloc *= 2;
3510 	  buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3511 	}
3512 
3513       QUIT;
3514     }
3515 }
3516 
3517 /* See target.h.  */
3518 
3519 LONGEST
3520 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3521 			  gdb_byte **buf_p)
3522 {
3523   return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3524 }
3525 
3526 /* See target.h.  */
3527 
3528 gdb::unique_xmalloc_ptr<char>
3529 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3530 {
3531   gdb_byte *buffer;
3532   char *bufstr;
3533   LONGEST i, transferred;
3534 
3535   transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3536   bufstr = (char *) buffer;
3537 
3538   if (transferred < 0)
3539     return gdb::unique_xmalloc_ptr<char> (nullptr);
3540 
3541   if (transferred == 0)
3542     return make_unique_xstrdup ("");
3543 
3544   bufstr[transferred] = 0;
3545 
3546   /* Check for embedded NUL bytes; but allow trailing NULs.  */
3547   for (i = strlen (bufstr); i < transferred; i++)
3548     if (bufstr[i] != 0)
3549       {
3550 	warning (_("target file %s "
3551 		   "contained unexpected null characters"),
3552 		 filename);
3553 	break;
3554       }
3555 
3556   return gdb::unique_xmalloc_ptr<char> (bufstr);
3557 }
3558 
3559 
3560 static int
3561 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3562 				     CORE_ADDR addr, int len)
3563 {
3564   return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3565 }
3566 
3567 static int
3568 default_watchpoint_addr_within_range (struct target_ops *target,
3569 				      CORE_ADDR addr,
3570 				      CORE_ADDR start, int length)
3571 {
3572   return addr >= start && addr < start + length;
3573 }
3574 
3575 /* See target.h.  */
3576 
3577 target_ops *
3578 target_stack::find_beneath (const target_ops *t) const
3579 {
3580   /* Look for a non-empty slot at stratum levels beneath T's.  */
3581   for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3582     if (m_stack[stratum].get () != NULL)
3583       return m_stack[stratum].get ();
3584 
3585   return NULL;
3586 }
3587 
3588 /* See target.h.  */
3589 
3590 struct target_ops *
3591 find_target_at (enum strata stratum)
3592 {
3593   return current_inferior ()->target_at (stratum);
3594 }
3595 
3596 
3597 
3598 /* See target.h  */
3599 
3600 void
3601 target_announce_detach (int from_tty)
3602 {
3603   pid_t pid;
3604   const char *exec_file;
3605 
3606   if (!from_tty)
3607     return;
3608 
3609   pid = inferior_ptid.pid ();
3610   exec_file = get_exec_file (0);
3611   if (exec_file == nullptr)
3612     gdb_printf ("Detaching from pid %s\n",
3613 		target_pid_to_str (ptid_t (pid)).c_str ());
3614   else
3615     gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3616 		target_pid_to_str (ptid_t (pid)).c_str ());
3617 }
3618 
3619 /* See target.h  */
3620 
3621 void
3622 target_announce_attach (int from_tty, int pid)
3623 {
3624   if (!from_tty)
3625     return;
3626 
3627   const char *exec_file = get_exec_file (0);
3628 
3629   if (exec_file != nullptr)
3630     gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3631 		target_pid_to_str (ptid_t (pid)).c_str ());
3632   else
3633     gdb_printf ("Attaching to %s\n",
3634 		target_pid_to_str (ptid_t (pid)).c_str ());
3635 }
3636 
3637 /* The inferior process has died.  Long live the inferior!  */
3638 
3639 void
3640 generic_mourn_inferior (void)
3641 {
3642   inferior *inf = current_inferior ();
3643 
3644   switch_to_no_thread ();
3645 
3646   /* Mark breakpoints uninserted in case something tries to delete a
3647      breakpoint while we delete the inferior's threads (which would
3648      fail, since the inferior is long gone).  */
3649   mark_breakpoints_out ();
3650 
3651   if (inf->pid != 0)
3652     exit_inferior (inf);
3653 
3654   /* Note this wipes step-resume breakpoints, so needs to be done
3655      after exit_inferior, which ends up referencing the step-resume
3656      breakpoints through clear_thread_inferior_resources.  */
3657   breakpoint_init_inferior (inf_exited);
3658 
3659   registers_changed ();
3660 
3661   reopen_exec_file ();
3662   reinit_frame_cache ();
3663 
3664   if (deprecated_detach_hook)
3665     deprecated_detach_hook ();
3666 }
3667 
3668 /* Convert a normal process ID to a string.  Returns the string in a
3669    static buffer.  */
3670 
3671 std::string
3672 normal_pid_to_str (ptid_t ptid)
3673 {
3674   return string_printf ("process %d", ptid.pid ());
3675 }
3676 
3677 static std::string
3678 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3679 {
3680   return normal_pid_to_str (ptid);
3681 }
3682 
3683 /* Error-catcher for target_find_memory_regions.  */
3684 static int
3685 dummy_find_memory_regions (struct target_ops *self,
3686 			   find_memory_region_ftype ignore1, void *ignore2)
3687 {
3688   error (_("Command not implemented for this target."));
3689   return 0;
3690 }
3691 
3692 /* Error-catcher for target_make_corefile_notes.  */
3693 static gdb::unique_xmalloc_ptr<char>
3694 dummy_make_corefile_notes (struct target_ops *self,
3695 			   bfd *ignore1, int *ignore2)
3696 {
3697   error (_("Command not implemented for this target."));
3698   return NULL;
3699 }
3700 
3701 #include "target-delegates.c"
3702 
3703 /* The initial current target, so that there is always a semi-valid
3704    current target.  */
3705 
3706 static dummy_target the_dummy_target;
3707 
3708 /* See target.h.  */
3709 
3710 target_ops *
3711 get_dummy_target ()
3712 {
3713   return &the_dummy_target;
3714 }
3715 
3716 static const target_info dummy_target_info = {
3717   "None",
3718   N_("None"),
3719   ""
3720 };
3721 
3722 strata
3723 dummy_target::stratum () const
3724 {
3725   return dummy_stratum;
3726 }
3727 
3728 strata
3729 debug_target::stratum () const
3730 {
3731   return debug_stratum;
3732 }
3733 
3734 const target_info &
3735 dummy_target::info () const
3736 {
3737   return dummy_target_info;
3738 }
3739 
3740 const target_info &
3741 debug_target::info () const
3742 {
3743   return beneath ()->info ();
3744 }
3745 
3746 
3747 
3748 void
3749 target_close (struct target_ops *targ)
3750 {
3751   for (inferior *inf : all_inferiors ())
3752     gdb_assert (!inf->target_is_pushed (targ));
3753 
3754   fileio_handles_invalidate_target (targ);
3755 
3756   targ->close ();
3757 
3758   if (targetdebug)
3759     gdb_printf (gdb_stdlog, "target_close ()\n");
3760 }
3761 
3762 int
3763 target_thread_alive (ptid_t ptid)
3764 {
3765   return current_inferior ()->top_target ()->thread_alive (ptid);
3766 }
3767 
3768 void
3769 target_update_thread_list (void)
3770 {
3771   current_inferior ()->top_target ()->update_thread_list ();
3772 }
3773 
3774 void
3775 target_stop (ptid_t ptid)
3776 {
3777   process_stratum_target *proc_target = current_inferior ()->process_target ();
3778 
3779   gdb_assert (!proc_target->commit_resumed_state);
3780 
3781   if (!may_stop)
3782     {
3783       warning (_("May not interrupt or stop the target, ignoring attempt"));
3784       return;
3785     }
3786 
3787   current_inferior ()->top_target ()->stop (ptid);
3788 }
3789 
3790 void
3791 target_interrupt ()
3792 {
3793   if (!may_stop)
3794     {
3795       warning (_("May not interrupt or stop the target, ignoring attempt"));
3796       return;
3797     }
3798 
3799   current_inferior ()->top_target ()->interrupt ();
3800 }
3801 
3802 /* See target.h.  */
3803 
3804 void
3805 target_pass_ctrlc (void)
3806 {
3807   /* Pass the Ctrl-C to the first target that has a thread
3808      running.  */
3809   for (inferior *inf : all_inferiors ())
3810     {
3811       target_ops *proc_target = inf->process_target ();
3812       if (proc_target == NULL)
3813 	continue;
3814 
3815       for (thread_info *thr : inf->non_exited_threads ())
3816 	{
3817 	  /* A thread can be THREAD_STOPPED and executing, while
3818 	     running an infcall.  */
3819 	  if (thr->state == THREAD_RUNNING || thr->executing ())
3820 	    {
3821 	      /* We can get here quite deep in target layers.  Avoid
3822 		 switching thread context or anything that would
3823 		 communicate with the target (e.g., to fetch
3824 		 registers), or flushing e.g., the frame cache.  We
3825 		 just switch inferior in order to be able to call
3826 		 through the target_stack.  */
3827 	      scoped_restore_current_inferior restore_inferior;
3828 	      set_current_inferior (inf);
3829 	      current_inferior ()->top_target ()->pass_ctrlc ();
3830 	      return;
3831 	    }
3832 	}
3833     }
3834 }
3835 
3836 /* See target.h.  */
3837 
3838 void
3839 default_target_pass_ctrlc (struct target_ops *ops)
3840 {
3841   target_interrupt ();
3842 }
3843 
3844 /* See target/target.h.  */
3845 
3846 void
3847 target_stop_and_wait (ptid_t ptid)
3848 {
3849   struct target_waitstatus status;
3850   bool was_non_stop = non_stop;
3851 
3852   non_stop = true;
3853   target_stop (ptid);
3854 
3855   target_wait (ptid, &status, 0);
3856 
3857   non_stop = was_non_stop;
3858 }
3859 
3860 /* See target/target.h.  */
3861 
3862 void
3863 target_continue_no_signal (ptid_t ptid)
3864 {
3865   target_resume (ptid, 0, GDB_SIGNAL_0);
3866 }
3867 
3868 /* See target/target.h.  */
3869 
3870 void
3871 target_continue (ptid_t ptid, enum gdb_signal signal)
3872 {
3873   target_resume (ptid, 0, signal);
3874 }
3875 
3876 /* Concatenate ELEM to LIST, a comma-separated list.  */
3877 
3878 static void
3879 str_comma_list_concat_elem (std::string *list, const char *elem)
3880 {
3881   if (!list->empty ())
3882     list->append (", ");
3883 
3884   list->append (elem);
3885 }
3886 
3887 /* Helper for target_options_to_string.  If OPT is present in
3888    TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3889    OPT is removed from TARGET_OPTIONS.  */
3890 
3891 static void
3892 do_option (target_wait_flags *target_options, std::string *ret,
3893 	   target_wait_flag opt, const char *opt_str)
3894 {
3895   if ((*target_options & opt) != 0)
3896     {
3897       str_comma_list_concat_elem (ret, opt_str);
3898       *target_options &= ~opt;
3899     }
3900 }
3901 
3902 /* See target.h.  */
3903 
3904 std::string
3905 target_options_to_string (target_wait_flags target_options)
3906 {
3907   std::string ret;
3908 
3909 #define DO_TARG_OPTION(OPT) \
3910   do_option (&target_options, &ret, OPT, #OPT)
3911 
3912   DO_TARG_OPTION (TARGET_WNOHANG);
3913 
3914   if (target_options != 0)
3915     str_comma_list_concat_elem (&ret, "unknown???");
3916 
3917   return ret;
3918 }
3919 
3920 void
3921 target_fetch_registers (struct regcache *regcache, int regno)
3922 {
3923   current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3924   if (targetdebug)
3925     regcache->debug_print_register ("target_fetch_registers", regno);
3926 }
3927 
3928 void
3929 target_store_registers (struct regcache *regcache, int regno)
3930 {
3931   if (!may_write_registers)
3932     error (_("Writing to registers is not allowed (regno %d)"), regno);
3933 
3934   current_inferior ()->top_target ()->store_registers (regcache, regno);
3935   if (targetdebug)
3936     {
3937       regcache->debug_print_register ("target_store_registers", regno);
3938     }
3939 }
3940 
3941 int
3942 target_core_of_thread (ptid_t ptid)
3943 {
3944   return current_inferior ()->top_target ()->core_of_thread (ptid);
3945 }
3946 
3947 int
3948 simple_verify_memory (struct target_ops *ops,
3949 		      const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3950 {
3951   LONGEST total_xfered = 0;
3952 
3953   while (total_xfered < size)
3954     {
3955       ULONGEST xfered_len;
3956       enum target_xfer_status status;
3957       gdb_byte buf[1024];
3958       ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3959 
3960       status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3961 				    buf, NULL, lma + total_xfered, howmuch,
3962 				    &xfered_len);
3963       if (status == TARGET_XFER_OK
3964 	  && memcmp (data + total_xfered, buf, xfered_len) == 0)
3965 	{
3966 	  total_xfered += xfered_len;
3967 	  QUIT;
3968 	}
3969       else
3970 	return 0;
3971     }
3972   return 1;
3973 }
3974 
3975 /* Default implementation of memory verification.  */
3976 
3977 static int
3978 default_verify_memory (struct target_ops *self,
3979 		       const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3980 {
3981   /* Start over from the top of the target stack.  */
3982   return simple_verify_memory (current_inferior ()->top_target (),
3983 			       data, memaddr, size);
3984 }
3985 
3986 int
3987 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3988 {
3989   target_ops *target = current_inferior ()->top_target ();
3990 
3991   return target->verify_memory (data, memaddr, size);
3992 }
3993 
3994 /* The documentation for this function is in its prototype declaration in
3995    target.h.  */
3996 
3997 int
3998 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3999 			       enum target_hw_bp_type rw)
4000 {
4001   target_ops *target = current_inferior ()->top_target ();
4002 
4003   return target->insert_mask_watchpoint (addr, mask, rw);
4004 }
4005 
4006 /* The documentation for this function is in its prototype declaration in
4007    target.h.  */
4008 
4009 int
4010 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
4011 			       enum target_hw_bp_type rw)
4012 {
4013   target_ops *target = current_inferior ()->top_target ();
4014 
4015   return target->remove_mask_watchpoint (addr, mask, rw);
4016 }
4017 
4018 /* The documentation for this function is in its prototype declaration
4019    in target.h.  */
4020 
4021 int
4022 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4023 {
4024   target_ops *target = current_inferior ()->top_target ();
4025 
4026   return target->masked_watch_num_registers (addr, mask);
4027 }
4028 
4029 /* The documentation for this function is in its prototype declaration
4030    in target.h.  */
4031 
4032 int
4033 target_ranged_break_num_registers (void)
4034 {
4035   return current_inferior ()->top_target ()->ranged_break_num_registers ();
4036 }
4037 
4038 /* See target.h.  */
4039 
4040 struct btrace_target_info *
4041 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4042 {
4043   return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4044 }
4045 
4046 /* See target.h.  */
4047 
4048 void
4049 target_disable_btrace (struct btrace_target_info *btinfo)
4050 {
4051   current_inferior ()->top_target ()->disable_btrace (btinfo);
4052 }
4053 
4054 /* See target.h.  */
4055 
4056 void
4057 target_teardown_btrace (struct btrace_target_info *btinfo)
4058 {
4059   current_inferior ()->top_target ()->teardown_btrace (btinfo);
4060 }
4061 
4062 /* See target.h.  */
4063 
4064 enum btrace_error
4065 target_read_btrace (struct btrace_data *btrace,
4066 		    struct btrace_target_info *btinfo,
4067 		    enum btrace_read_type type)
4068 {
4069   target_ops *target = current_inferior ()->top_target ();
4070 
4071   return target->read_btrace (btrace, btinfo, type);
4072 }
4073 
4074 /* See target.h.  */
4075 
4076 const struct btrace_config *
4077 target_btrace_conf (const struct btrace_target_info *btinfo)
4078 {
4079   return current_inferior ()->top_target ()->btrace_conf (btinfo);
4080 }
4081 
4082 /* See target.h.  */
4083 
4084 void
4085 target_stop_recording (void)
4086 {
4087   current_inferior ()->top_target ()->stop_recording ();
4088 }
4089 
4090 /* See target.h.  */
4091 
4092 void
4093 target_save_record (const char *filename)
4094 {
4095   current_inferior ()->top_target ()->save_record (filename);
4096 }
4097 
4098 /* See target.h.  */
4099 
4100 int
4101 target_supports_delete_record ()
4102 {
4103   return current_inferior ()->top_target ()->supports_delete_record ();
4104 }
4105 
4106 /* See target.h.  */
4107 
4108 void
4109 target_delete_record (void)
4110 {
4111   current_inferior ()->top_target ()->delete_record ();
4112 }
4113 
4114 /* See target.h.  */
4115 
4116 enum record_method
4117 target_record_method (ptid_t ptid)
4118 {
4119   return current_inferior ()->top_target ()->record_method (ptid);
4120 }
4121 
4122 /* See target.h.  */
4123 
4124 int
4125 target_record_is_replaying (ptid_t ptid)
4126 {
4127   return current_inferior ()->top_target ()->record_is_replaying (ptid);
4128 }
4129 
4130 /* See target.h.  */
4131 
4132 int
4133 target_record_will_replay (ptid_t ptid, int dir)
4134 {
4135   return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4136 }
4137 
4138 /* See target.h.  */
4139 
4140 void
4141 target_record_stop_replaying (void)
4142 {
4143   current_inferior ()->top_target ()->record_stop_replaying ();
4144 }
4145 
4146 /* See target.h.  */
4147 
4148 void
4149 target_goto_record_begin (void)
4150 {
4151   current_inferior ()->top_target ()->goto_record_begin ();
4152 }
4153 
4154 /* See target.h.  */
4155 
4156 void
4157 target_goto_record_end (void)
4158 {
4159   current_inferior ()->top_target ()->goto_record_end ();
4160 }
4161 
4162 /* See target.h.  */
4163 
4164 void
4165 target_goto_record (ULONGEST insn)
4166 {
4167   current_inferior ()->top_target ()->goto_record (insn);
4168 }
4169 
4170 /* See target.h.  */
4171 
4172 void
4173 target_insn_history (int size, gdb_disassembly_flags flags)
4174 {
4175   current_inferior ()->top_target ()->insn_history (size, flags);
4176 }
4177 
4178 /* See target.h.  */
4179 
4180 void
4181 target_insn_history_from (ULONGEST from, int size,
4182 			  gdb_disassembly_flags flags)
4183 {
4184   current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4185 }
4186 
4187 /* See target.h.  */
4188 
4189 void
4190 target_insn_history_range (ULONGEST begin, ULONGEST end,
4191 			   gdb_disassembly_flags flags)
4192 {
4193   current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4194 }
4195 
4196 /* See target.h.  */
4197 
4198 void
4199 target_call_history (int size, record_print_flags flags)
4200 {
4201   current_inferior ()->top_target ()->call_history (size, flags);
4202 }
4203 
4204 /* See target.h.  */
4205 
4206 void
4207 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4208 {
4209   current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4210 }
4211 
4212 /* See target.h.  */
4213 
4214 void
4215 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4216 {
4217   current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4218 }
4219 
4220 /* See target.h.  */
4221 
4222 const struct frame_unwind *
4223 target_get_unwinder (void)
4224 {
4225   return current_inferior ()->top_target ()->get_unwinder ();
4226 }
4227 
4228 /* See target.h.  */
4229 
4230 const struct frame_unwind *
4231 target_get_tailcall_unwinder (void)
4232 {
4233   return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4234 }
4235 
4236 /* See target.h.  */
4237 
4238 void
4239 target_prepare_to_generate_core (void)
4240 {
4241   current_inferior ()->top_target ()->prepare_to_generate_core ();
4242 }
4243 
4244 /* See target.h.  */
4245 
4246 void
4247 target_done_generating_core (void)
4248 {
4249   current_inferior ()->top_target ()->done_generating_core ();
4250 }
4251 
4252 
4253 
4254 static char targ_desc[] =
4255 "Names of targets and files being debugged.\nShows the entire \
4256 stack of targets currently in use (including the exec-file,\n\
4257 core-file, and process, if any), as well as the symbol file name.";
4258 
4259 static void
4260 default_rcmd (struct target_ops *self, const char *command,
4261 	      struct ui_file *output)
4262 {
4263   error (_("\"monitor\" command not supported by this target."));
4264 }
4265 
4266 static void
4267 do_monitor_command (const char *cmd, int from_tty)
4268 {
4269   target_rcmd (cmd, gdb_stdtarg);
4270 }
4271 
4272 /* Erases all the memory regions marked as flash.  CMD and FROM_TTY are
4273    ignored.  */
4274 
4275 void
4276 flash_erase_command (const char *cmd, int from_tty)
4277 {
4278   /* Used to communicate termination of flash operations to the target.  */
4279   bool found_flash_region = false;
4280   struct gdbarch *gdbarch = target_gdbarch ();
4281 
4282   std::vector<mem_region> mem_regions = target_memory_map ();
4283 
4284   /* Iterate over all memory regions.  */
4285   for (const mem_region &m : mem_regions)
4286     {
4287       /* Is this a flash memory region?  */
4288       if (m.attrib.mode == MEM_FLASH)
4289 	{
4290 	  found_flash_region = true;
4291 	  target_flash_erase (m.lo, m.hi - m.lo);
4292 
4293 	  ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4294 
4295 	  current_uiout->message (_("Erasing flash memory region at address "));
4296 	  current_uiout->field_core_addr ("address", gdbarch, m.lo);
4297 	  current_uiout->message (", size = ");
4298 	  current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4299 	  current_uiout->message ("\n");
4300 	}
4301     }
4302 
4303   /* Did we do any flash operations?  If so, we need to finalize them.  */
4304   if (found_flash_region)
4305     target_flash_done ();
4306   else
4307     current_uiout->message (_("No flash memory regions found.\n"));
4308 }
4309 
4310 /* Print the name of each layers of our target stack.  */
4311 
4312 static void
4313 maintenance_print_target_stack (const char *cmd, int from_tty)
4314 {
4315   gdb_printf (_("The current target stack is:\n"));
4316 
4317   for (target_ops *t = current_inferior ()->top_target ();
4318        t != NULL;
4319        t = t->beneath ())
4320     {
4321       if (t->stratum () == debug_stratum)
4322 	continue;
4323       gdb_printf ("  - %s (%s)\n", t->shortname (), t->longname ());
4324     }
4325 }
4326 
4327 /* See target.h.  */
4328 
4329 void
4330 target_async (bool enable)
4331 {
4332   /* If we are trying to enable async mode then it must be the case that
4333      async mode is possible for this target.  */
4334   gdb_assert (!enable || target_can_async_p ());
4335   infrun_async (enable);
4336   current_inferior ()->top_target ()->async (enable);
4337 }
4338 
4339 /* See target.h.  */
4340 
4341 void
4342 target_thread_events (int enable)
4343 {
4344   current_inferior ()->top_target ()->thread_events (enable);
4345 }
4346 
4347 /* Controls if targets can report that they can/are async.  This is
4348    just for maintainers to use when debugging gdb.  */
4349 bool target_async_permitted = true;
4350 
4351 static void
4352 set_maint_target_async (bool permitted)
4353 {
4354   if (have_live_inferiors ())
4355     error (_("Cannot change this setting while the inferior is running."));
4356 
4357   target_async_permitted = permitted;
4358 }
4359 
4360 static bool
4361 get_maint_target_async ()
4362 {
4363   return target_async_permitted;
4364 }
4365 
4366 static void
4367 show_maint_target_async (ui_file *file, int from_tty,
4368 			 cmd_list_element *c, const char *value)
4369 {
4370   gdb_printf (file,
4371 	      _("Controlling the inferior in "
4372 		"asynchronous mode is %s.\n"), value);
4373 }
4374 
4375 /* Return true if the target operates in non-stop mode even with "set
4376    non-stop off".  */
4377 
4378 static int
4379 target_always_non_stop_p (void)
4380 {
4381   return current_inferior ()->top_target ()->always_non_stop_p ();
4382 }
4383 
4384 /* See target.h.  */
4385 
4386 bool
4387 target_is_non_stop_p ()
4388 {
4389   return ((non_stop
4390 	   || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4391 	   || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4392 	       && target_always_non_stop_p ()))
4393 	  && target_can_async_p ());
4394 }
4395 
4396 /* See target.h.  */
4397 
4398 bool
4399 exists_non_stop_target ()
4400 {
4401   if (target_is_non_stop_p ())
4402     return true;
4403 
4404   scoped_restore_current_thread restore_thread;
4405 
4406   for (inferior *inf : all_inferiors ())
4407     {
4408       switch_to_inferior_no_thread (inf);
4409       if (target_is_non_stop_p ())
4410 	return true;
4411     }
4412 
4413   return false;
4414 }
4415 
4416 /* Controls if targets can report that they always run in non-stop
4417    mode.  This is just for maintainers to use when debugging gdb.  */
4418 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4419 
4420 /* Set callback for maint target-non-stop setting.  */
4421 
4422 static void
4423 set_maint_target_non_stop (auto_boolean enabled)
4424 {
4425   if (have_live_inferiors ())
4426     error (_("Cannot change this setting while the inferior is running."));
4427 
4428   target_non_stop_enabled = enabled;
4429 }
4430 
4431 /* Get callback for maint target-non-stop setting.  */
4432 
4433 static auto_boolean
4434 get_maint_target_non_stop ()
4435 {
4436   return target_non_stop_enabled;
4437 }
4438 
4439 static void
4440 show_maint_target_non_stop (ui_file *file, int from_tty,
4441 			    cmd_list_element *c, const char *value)
4442 {
4443   if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4444     gdb_printf (file,
4445 		_("Whether the target is always in non-stop mode "
4446 		  "is %s (currently %s).\n"), value,
4447 		target_always_non_stop_p () ? "on" : "off");
4448   else
4449     gdb_printf (file,
4450 		_("Whether the target is always in non-stop mode "
4451 		  "is %s.\n"), value);
4452 }
4453 
4454 /* Temporary copies of permission settings.  */
4455 
4456 static bool may_write_registers_1 = true;
4457 static bool may_write_memory_1 = true;
4458 static bool may_insert_breakpoints_1 = true;
4459 static bool may_insert_tracepoints_1 = true;
4460 static bool may_insert_fast_tracepoints_1 = true;
4461 static bool may_stop_1 = true;
4462 
4463 /* Make the user-set values match the real values again.  */
4464 
4465 void
4466 update_target_permissions (void)
4467 {
4468   may_write_registers_1 = may_write_registers;
4469   may_write_memory_1 = may_write_memory;
4470   may_insert_breakpoints_1 = may_insert_breakpoints;
4471   may_insert_tracepoints_1 = may_insert_tracepoints;
4472   may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4473   may_stop_1 = may_stop;
4474 }
4475 
4476 /* The one function handles (most of) the permission flags in the same
4477    way.  */
4478 
4479 static void
4480 set_target_permissions (const char *args, int from_tty,
4481 			struct cmd_list_element *c)
4482 {
4483   if (target_has_execution ())
4484     {
4485       update_target_permissions ();
4486       error (_("Cannot change this setting while the inferior is running."));
4487     }
4488 
4489   /* Make the real values match the user-changed values.  */
4490   may_write_registers = may_write_registers_1;
4491   may_insert_breakpoints = may_insert_breakpoints_1;
4492   may_insert_tracepoints = may_insert_tracepoints_1;
4493   may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4494   may_stop = may_stop_1;
4495   update_observer_mode ();
4496 }
4497 
4498 /* Set memory write permission independently of observer mode.  */
4499 
4500 static void
4501 set_write_memory_permission (const char *args, int from_tty,
4502 			struct cmd_list_element *c)
4503 {
4504   /* Make the real values match the user-changed values.  */
4505   may_write_memory = may_write_memory_1;
4506   update_observer_mode ();
4507 }
4508 
4509 void _initialize_target ();
4510 
4511 void
4512 _initialize_target ()
4513 {
4514   the_debug_target = new debug_target ();
4515 
4516   add_info ("target", info_target_command, targ_desc);
4517   add_info ("files", info_target_command, targ_desc);
4518 
4519   add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4520 Set target debugging."), _("\
4521 Show target debugging."), _("\
4522 When non-zero, target debugging is enabled.  Higher numbers are more\n\
4523 verbose."),
4524 			     set_targetdebug,
4525 			     show_targetdebug,
4526 			     &setdebuglist, &showdebuglist);
4527 
4528   add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4529 			   &trust_readonly, _("\
4530 Set mode for reading from readonly sections."), _("\
4531 Show mode for reading from readonly sections."), _("\
4532 When this mode is on, memory reads from readonly sections (such as .text)\n\
4533 will be read from the object file instead of from the target.  This will\n\
4534 result in significant performance improvement for remote targets."),
4535 			   NULL,
4536 			   show_trust_readonly,
4537 			   &setlist, &showlist);
4538 
4539   add_com ("monitor", class_obscure, do_monitor_command,
4540 	   _("Send a command to the remote monitor (remote targets only)."));
4541 
4542   add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4543 	   _("Print the name of each layer of the internal target stack."),
4544 	   &maintenanceprintlist);
4545 
4546   add_setshow_boolean_cmd ("target-async", no_class,
4547 			   _("\
4548 Set whether gdb controls the inferior in asynchronous mode."), _("\
4549 Show whether gdb controls the inferior in asynchronous mode."), _("\
4550 Tells gdb whether to control the inferior in asynchronous mode."),
4551 			   set_maint_target_async,
4552 			   get_maint_target_async,
4553 			   show_maint_target_async,
4554 			   &maintenance_set_cmdlist,
4555 			   &maintenance_show_cmdlist);
4556 
4557   add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4558 				_("\
4559 Set whether gdb always controls the inferior in non-stop mode."), _("\
4560 Show whether gdb always controls the inferior in non-stop mode."), _("\
4561 Tells gdb whether to control the inferior in non-stop mode."),
4562 			   set_maint_target_non_stop,
4563 			   get_maint_target_non_stop,
4564 			   show_maint_target_non_stop,
4565 			   &maintenance_set_cmdlist,
4566 			   &maintenance_show_cmdlist);
4567 
4568   add_setshow_boolean_cmd ("may-write-registers", class_support,
4569 			   &may_write_registers_1, _("\
4570 Set permission to write into registers."), _("\
4571 Show permission to write into registers."), _("\
4572 When this permission is on, GDB may write into the target's registers.\n\
4573 Otherwise, any sort of write attempt will result in an error."),
4574 			   set_target_permissions, NULL,
4575 			   &setlist, &showlist);
4576 
4577   add_setshow_boolean_cmd ("may-write-memory", class_support,
4578 			   &may_write_memory_1, _("\
4579 Set permission to write into target memory."), _("\
4580 Show permission to write into target memory."), _("\
4581 When this permission is on, GDB may write into the target's memory.\n\
4582 Otherwise, any sort of write attempt will result in an error."),
4583 			   set_write_memory_permission, NULL,
4584 			   &setlist, &showlist);
4585 
4586   add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4587 			   &may_insert_breakpoints_1, _("\
4588 Set permission to insert breakpoints in the target."), _("\
4589 Show permission to insert breakpoints in the target."), _("\
4590 When this permission is on, GDB may insert breakpoints in the program.\n\
4591 Otherwise, any sort of insertion attempt will result in an error."),
4592 			   set_target_permissions, NULL,
4593 			   &setlist, &showlist);
4594 
4595   add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4596 			   &may_insert_tracepoints_1, _("\
4597 Set permission to insert tracepoints in the target."), _("\
4598 Show permission to insert tracepoints in the target."), _("\
4599 When this permission is on, GDB may insert tracepoints in the program.\n\
4600 Otherwise, any sort of insertion attempt will result in an error."),
4601 			   set_target_permissions, NULL,
4602 			   &setlist, &showlist);
4603 
4604   add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4605 			   &may_insert_fast_tracepoints_1, _("\
4606 Set permission to insert fast tracepoints in the target."), _("\
4607 Show permission to insert fast tracepoints in the target."), _("\
4608 When this permission is on, GDB may insert fast tracepoints.\n\
4609 Otherwise, any sort of insertion attempt will result in an error."),
4610 			   set_target_permissions, NULL,
4611 			   &setlist, &showlist);
4612 
4613   add_setshow_boolean_cmd ("may-interrupt", class_support,
4614 			   &may_stop_1, _("\
4615 Set permission to interrupt or signal the target."), _("\
4616 Show permission to interrupt or signal the target."), _("\
4617 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4618 Otherwise, any attempt to interrupt or stop will be ignored."),
4619 			   set_target_permissions, NULL,
4620 			   &setlist, &showlist);
4621 
4622   add_com ("flash-erase", no_class, flash_erase_command,
4623 	   _("Erase all flash memory regions."));
4624 
4625   add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4626 			   &auto_connect_native_target, _("\
4627 Set whether GDB may automatically connect to the native target."), _("\
4628 Show whether GDB may automatically connect to the native target."), _("\
4629 When on, and GDB is not connected to a target yet, GDB\n\
4630 attempts \"run\" and other commands with the native target."),
4631 			   NULL, show_auto_connect_native_target,
4632 			   &setlist, &showlist);
4633 }
4634