xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/infrun.h (revision 8b657b0747480f8989760d71343d6dd33f8d4cf9)
1 /* Copyright (C) 1986-2023 Free Software Foundation, Inc.
2 
3    This file is part of GDB.
4 
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 3 of the License, or
8    (at your option) any later version.
9 
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14 
15    You should have received a copy of the GNU General Public License
16    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
17 
18 #ifndef INFRUN_H
19 #define INFRUN_H 1
20 
21 #include "gdbthread.h"
22 #include "symtab.h"
23 #include "gdbsupport/byte-vector.h"
24 #include "gdbsupport/intrusive_list.h"
25 
26 struct target_waitstatus;
27 class frame_info_ptr;
28 struct address_space;
29 struct return_value_info;
30 struct process_stratum_target;
31 struct thread_info;
32 
33 /* True if we are debugging run control.  */
34 extern bool debug_infrun;
35 
36 /* Print an "infrun" debug statement.  */
37 
38 #define infrun_debug_printf(fmt, ...) \
39   debug_prefixed_printf_cond (debug_infrun, "infrun", fmt, ##__VA_ARGS__)
40 
41 /* Print "infrun" start/end debug statements.  */
42 
43 #define INFRUN_SCOPED_DEBUG_START_END(fmt, ...) \
44   scoped_debug_start_end (debug_infrun, "infrun", fmt, ##__VA_ARGS__)
45 
46 /* Print "infrun" enter/exit debug statements.  */
47 
48 #define INFRUN_SCOPED_DEBUG_ENTER_EXIT \
49   scoped_debug_enter_exit (debug_infrun, "infrun")
50 
51 /* A infrun debug helper routine to print out all the threads in the set
52    THREADS (which should be a range type that returns thread_info*
53    objects).
54 
55    The TITLE is a string that is printed before the list of threads.
56 
57    Output is only produced when 'set debug infrun on'.  */
58 
59 template<typename ThreadRange>
60 static inline void
61 infrun_debug_show_threads (const char *title, ThreadRange threads)
62 {
63   if (debug_infrun)
64     {
65       INFRUN_SCOPED_DEBUG_ENTER_EXIT;
66 
67       infrun_debug_printf ("%s:", title);
68       for (thread_info *thread : threads)
69 	infrun_debug_printf ("  thread %s, executing = %d, resumed = %d, "
70 			     "state = %s",
71 			     thread->ptid.to_string ().c_str (),
72 			     thread->executing (),
73 			     thread->resumed (),
74 			     thread_state_string (thread->state));
75     }
76 }
77 
78 
79 /* Nonzero if we want to give control to the user when we're notified
80    of shared library events by the dynamic linker.  */
81 extern int stop_on_solib_events;
82 
83 /* True if execution commands resume all threads of all processes by
84    default; otherwise, resume only threads of the current inferior
85    process.  */
86 extern bool sched_multi;
87 
88 /* When set, stop the 'step' command if we enter a function which has
89    no line number information.  The normal behavior is that we step
90    over such function.  */
91 extern bool step_stop_if_no_debug;
92 
93 /* If set, the inferior should be controlled in non-stop mode.  In
94    this mode, each thread is controlled independently.  Execution
95    commands apply only to the selected thread by default, and stop
96    events stop only the thread that had the event -- the other threads
97    are kept running freely.  */
98 extern bool non_stop;
99 
100 /* When set (default), the target should attempt to disable the
101    operating system's address space randomization feature when
102    starting an inferior.  */
103 extern bool disable_randomization;
104 
105 /* Returns a unique identifier for the current stop.  This can be used
106    to tell whether a command has proceeded the inferior past the
107    current location.  */
108 extern ULONGEST get_stop_id (void);
109 
110 /* Reverse execution.  */
111 enum exec_direction_kind
112   {
113     EXEC_FORWARD,
114     EXEC_REVERSE
115   };
116 
117 /* The current execution direction.  */
118 extern enum exec_direction_kind execution_direction;
119 
120 extern void start_remote (int from_tty);
121 
122 /* Clear out all variables saying what to do when inferior is
123    continued or stepped.  First do this, then set the ones you want,
124    then call `proceed'.  STEP indicates whether we're preparing for a
125    step/stepi command.  */
126 extern void clear_proceed_status (int step);
127 
128 extern void proceed (CORE_ADDR, enum gdb_signal);
129 
130 /* Return a ptid representing the set of threads that we will proceed,
131    in the perspective of the user/frontend.  We may actually resume
132    fewer threads at first, e.g., if a thread is stopped at a
133    breakpoint that needs stepping-off, but that should not be visible
134    to the user/frontend, and neither should the frontend/user be
135    allowed to proceed any of the threads that happen to be stopped for
136    internal run control handling, if a previous command wanted them
137    resumed.  */
138 extern ptid_t user_visible_resume_ptid (int step);
139 
140 /* Return the process_stratum target that we will proceed, in the
141    perspective of the user/frontend.  If RESUME_PTID is
142    MINUS_ONE_PTID, then we'll resume all threads of all targets, so
143    the function returns NULL.  Otherwise, we'll be resuming a process
144    or thread of the current process, so we return the current
145    inferior's process stratum target.  */
146 extern process_stratum_target *user_visible_resume_target (ptid_t resume_ptid);
147 
148 /* Return control to GDB when the inferior stops for real.  Print
149    appropriate messages, remove breakpoints, give terminal our modes,
150    and run the stop hook.  Returns true if the stop hook proceeded the
151    target, false otherwise.  */
152 extern int normal_stop (void);
153 
154 /* Return the cached copy of the last target/ptid/waitstatus returned
155    by target_wait().  The data is actually cached by handle_inferior_event(),
156    which gets called immediately after target_wait().  */
157 extern void get_last_target_status (process_stratum_target **target,
158 				    ptid_t *ptid,
159 				    struct target_waitstatus *status);
160 
161 /* Set the cached copy of the last target/ptid/waitstatus.  */
162 extern void set_last_target_status (process_stratum_target *target, ptid_t ptid,
163 				    const target_waitstatus &status);
164 
165 /* Clear the cached copy of the last ptid/waitstatus returned by
166    target_wait().  */
167 extern void nullify_last_target_wait_ptid ();
168 
169 /* Stop all threads.  Only returns after everything is halted.
170 
171    REASON is a string indicating the reason why we stop all threads, used in
172    debug messages.
173 
174    If INF is non-nullptr, stop all threads of that inferior.  Otherwise, stop
175    all threads of all inferiors.  */
176 extern void stop_all_threads (const char *reason, inferior *inf = nullptr);
177 
178 extern void prepare_for_detach (void);
179 
180 extern void fetch_inferior_event ();
181 
182 extern void init_wait_for_inferior (void);
183 
184 extern void insert_step_resume_breakpoint_at_sal (struct gdbarch *,
185 						  struct symtab_and_line ,
186 						  struct frame_id);
187 
188 /* Returns true if we're trying to step past the instruction at
189    ADDRESS in ASPACE.  */
190 extern int stepping_past_instruction_at (struct address_space *aspace,
191 					 CORE_ADDR address);
192 
193 /* Returns true if thread whose thread number is THREAD is stepping
194    over a breakpoint.  */
195 extern int thread_is_stepping_over_breakpoint (int thread);
196 
197 /* Returns true if we're trying to step past an instruction that
198    triggers a non-steppable watchpoint.  */
199 extern int stepping_past_nonsteppable_watchpoint (void);
200 
201 /* Record in TP the frame and location we're currently stepping through.  */
202 extern void set_step_info (thread_info *tp,
203 			   frame_info_ptr frame,
204 			   struct symtab_and_line sal);
205 
206 /* Several print_*_reason helper functions to print why the inferior
207    has stopped to the passed in UIOUT.  */
208 
209 /* Signal received, print why the inferior has stopped.  */
210 extern void print_signal_received_reason (struct ui_out *uiout,
211 					  enum gdb_signal siggnal);
212 
213 /* Print why the inferior has stopped.  We are done with a
214    step/next/si/ni command, print why the inferior has stopped.  */
215 extern void print_end_stepping_range_reason (struct ui_out *uiout);
216 
217 /* The inferior was terminated by a signal, print why it stopped.  */
218 extern void print_signal_exited_reason (struct ui_out *uiout,
219 					enum gdb_signal siggnal);
220 
221 /* The inferior program is finished, print why it stopped.  */
222 extern void print_exited_reason (struct ui_out *uiout, int exitstatus);
223 
224 /* Reverse execution: target ran out of history info, print why the
225    inferior has stopped.  */
226 extern void print_no_history_reason (struct ui_out *uiout);
227 
228 /* Print the result of a function at the end of a 'finish' command.
229    RV points at an object representing the captured return value/type
230    and its position in the value history.  */
231 
232 extern void print_return_value (struct ui_out *uiout,
233 				struct return_value_info *rv);
234 
235 /* Print current location without a level number, if we have changed
236    functions or hit a breakpoint.  Print source line if we have one.
237    If the execution command captured a return value, print it.  If
238    DISPLAYS is false, do not call 'do_displays'.  */
239 
240 extern void print_stop_event (struct ui_out *uiout, bool displays = true);
241 
242 /* Pretty print the results of target_wait, for debugging purposes.  */
243 
244 extern void print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
245 				       const struct target_waitstatus &ws);
246 
247 extern int signal_stop_state (int);
248 
249 extern int signal_print_state (int);
250 
251 extern int signal_pass_state (int);
252 
253 extern int signal_stop_update (int, int);
254 
255 extern int signal_print_update (int, int);
256 
257 extern int signal_pass_update (int, int);
258 
259 extern void update_signals_program_target (void);
260 
261 /* Clear the convenience variables associated with the exit of the
262    inferior.  Currently, those variables are $_exitcode and
263    $_exitsignal.  */
264 extern void clear_exit_convenience_vars (void);
265 
266 /* Dump LEN bytes at BUF in hex to a string and return it.  */
267 extern std::string displaced_step_dump_bytes (const gdb_byte *buf, size_t len);
268 
269 extern void update_observer_mode (void);
270 
271 extern void signal_catch_update (const unsigned int *);
272 
273 /* In some circumstances we allow a command to specify a numeric
274    signal.  The idea is to keep these circumstances limited so that
275    users (and scripts) develop portable habits.  For comparison,
276    POSIX.2 `kill' requires that 1,2,3,6,9,14, and 15 work (and using a
277    numeric signal at all is obsolescent.  We are slightly more lenient
278    and allow 1-15 which should match host signal numbers on most
279    systems.  Use of symbolic signal names is strongly encouraged.  */
280 enum gdb_signal gdb_signal_from_command (int num);
281 
282 /* Enables/disables infrun's async event source in the event loop.  */
283 extern void infrun_async (int enable);
284 
285 /* Call infrun's event handler the next time through the event
286    loop.  */
287 extern void mark_infrun_async_event_handler (void);
288 
289 /* The global chain of threads that need to do a step-over operation
290    to get past e.g., a breakpoint.  */
291 extern thread_step_over_list global_thread_step_over_list;
292 
293 /* Remove breakpoints if possible (usually that means, if everything
294    is stopped).  On failure, print a message.  */
295 extern void maybe_remove_breakpoints (void);
296 
297 /* If a UI was in sync execution mode, and now isn't, restore its
298    prompt (a synchronous execution command has finished, and we're
299    ready for input).  */
300 extern void all_uis_check_sync_execution_done (void);
301 
302 /* If a UI was in sync execution mode, and hasn't displayed the prompt
303    yet, re-disable its prompt (a synchronous execution command was
304    started or re-started).  */
305 extern void all_uis_on_sync_execution_starting (void);
306 
307 /* In all-stop, restart the target if it had to be stopped to
308    detach.  */
309 extern void restart_after_all_stop_detach (process_stratum_target *proc_target);
310 
311 /* RAII object to temporarily disable the requirement for target
312    stacks to commit their resumed threads.
313 
314    On construction, set process_stratum_target::commit_resumed_state
315    to false for all process_stratum targets in all target
316    stacks.
317 
318    On destruction (or if reset_and_commit() is called), set
319    process_stratum_target::commit_resumed_state to true for all
320    process_stratum targets in all target stacks, except those that:
321 
322      - have no resumed threads
323      - have a resumed thread with a pending status
324 
325    target_commit_resumed is not called in the destructor, because its
326    implementations could throw, and we don't to swallow that error in
327    a destructor.  Instead, the caller should call the
328    reset_and_commit_resumed() method so that an eventual exception can
329    propagate.  "reset" in the method name refers to the fact that this
330    method has the same effect as the destructor, in addition to
331    committing resumes.
332 
333    The creation of nested scoped_disable_commit_resumed objects is
334    tracked, such that only the outermost instance actually does
335    something, for cases like this:
336 
337      void
338      inner_func ()
339      {
340        scoped_disable_commit_resumed disable;
341 
342        // do stuff
343 
344        disable.reset_and_commit ();
345      }
346 
347      void
348      outer_func ()
349      {
350        scoped_disable_commit_resumed disable;
351 
352        for (... each thread ...)
353 	 inner_func ();
354 
355        disable.reset_and_commit ();
356      }
357 
358    In this case, we don't want the `disable` destructor in
359    `inner_func` to require targets to commit resumed threads, so that
360    the `reset_and_commit()` call in `inner_func` doesn't actually
361    resume threads.  */
362 
363 struct scoped_disable_commit_resumed
364 {
365   explicit scoped_disable_commit_resumed (const char *reason);
366   ~scoped_disable_commit_resumed ();
367 
368   DISABLE_COPY_AND_ASSIGN (scoped_disable_commit_resumed);
369 
370   /* Undoes the disabling done by the ctor, and calls
371      maybe_call_commit_resumed_all_targets().  */
372   void reset_and_commit ();
373 
374 private:
375   /* Undoes the disabling done by the ctor.  */
376   void reset ();
377 
378   /* Whether this object has been reset.  */
379   bool m_reset = false;
380 
381   const char *m_reason;
382   bool m_prev_enable_commit_resumed;
383 };
384 
385 /* Call target_commit_resumed method on all target stacks whose
386    process_stratum target layer has COMMIT_RESUME_STATE set.  */
387 
388 extern void maybe_call_commit_resumed_all_targets ();
389 
390 /* RAII object to temporarily enable the requirement for target stacks
391    to commit their resumed threads.  This is the inverse of
392    scoped_disable_commit_resumed.  The constructor calls the
393    maybe_call_commit_resumed_all_targets function itself, since it's
394    OK to throw from a constructor.  */
395 
396 struct scoped_enable_commit_resumed
397 {
398   explicit scoped_enable_commit_resumed (const char *reason);
399   ~scoped_enable_commit_resumed ();
400 
401   DISABLE_COPY_AND_ASSIGN (scoped_enable_commit_resumed);
402 
403 private:
404   const char *m_reason;
405   bool m_prev_enable_commit_resumed;
406 };
407 
408 
409 #endif /* INFRUN_H */
410