xref: /netbsd-src/external/gpl3/gdb/dist/gdb/infrun.h (revision f8cf1a9151c7af1cb0bd8b09c13c66bca599c027)
1 /* Copyright (C) 1986-2024 Free Software Foundation, Inc.
2 
3    This file is part of GDB.
4 
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 3 of the License, or
8    (at your option) any later version.
9 
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14 
15    You should have received a copy of the GNU General Public License
16    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
17 
18 #ifndef INFRUN_H
19 #define INFRUN_H 1
20 
21 #include "gdbthread.h"
22 #include "symtab.h"
23 #include "gdbsupport/byte-vector.h"
24 #include "gdbsupport/intrusive_list.h"
25 
26 struct target_waitstatus;
27 class frame_info_ptr;
28 struct address_space;
29 struct return_value_info;
30 struct process_stratum_target;
31 struct thread_info;
32 
33 /* True if we are debugging run control.  */
34 extern bool debug_infrun;
35 
36 /* Print an "infrun" debug statement.  */
37 
38 #define infrun_debug_printf(fmt, ...) \
39   debug_prefixed_printf_cond (debug_infrun, "infrun", fmt, ##__VA_ARGS__)
40 
41 /* Print "infrun" start/end debug statements.  */
42 
43 #define INFRUN_SCOPED_DEBUG_START_END(fmt, ...) \
44   scoped_debug_start_end (debug_infrun, "infrun", fmt, ##__VA_ARGS__)
45 
46 /* Print "infrun" enter/exit debug statements.  */
47 
48 #define INFRUN_SCOPED_DEBUG_ENTER_EXIT \
49   scoped_debug_enter_exit (debug_infrun, "infrun")
50 
51 /* A infrun debug helper routine to print out all the threads in the set
52    THREADS (which should be a range type that returns thread_info*
53    objects).
54 
55    The TITLE is a string that is printed before the list of threads.
56 
57    Output is only produced when 'set debug infrun on'.  */
58 
59 template<typename ThreadRange>
60 static inline void
61 infrun_debug_show_threads (const char *title, ThreadRange threads)
62 {
63   if (debug_infrun)
64     {
65       INFRUN_SCOPED_DEBUG_ENTER_EXIT;
66 
67       infrun_debug_printf ("%s:", title);
68       for (thread_info *thread : threads)
69 	infrun_debug_printf ("  thread %s, executing = %d, resumed = %d, "
70 			     "state = %s",
71 			     thread->ptid.to_string ().c_str (),
72 			     thread->executing (),
73 			     thread->resumed (),
74 			     thread_state_string (thread->state));
75     }
76 }
77 
78 
79 /* Nonzero if we want to give control to the user when we're notified
80    of shared library events by the dynamic linker.  */
81 extern int stop_on_solib_events;
82 
83 /* True if execution commands resume all threads of all processes by
84    default; otherwise, resume only threads of the current inferior
85    process.  */
86 extern bool sched_multi;
87 
88 /* When set, stop the 'step' command if we enter a function which has
89    no line number information.  The normal behavior is that we step
90    over such function.  */
91 extern bool step_stop_if_no_debug;
92 
93 /* If set, the inferior should be controlled in non-stop mode.  In
94    this mode, each thread is controlled independently.  Execution
95    commands apply only to the selected thread by default, and stop
96    events stop only the thread that had the event -- the other threads
97    are kept running freely.  */
98 extern bool non_stop;
99 
100 /* When set (default), the target should attempt to disable the
101    operating system's address space randomization feature when
102    starting an inferior.  */
103 extern bool disable_randomization;
104 
105 /* Returns a unique identifier for the current stop.  This can be used
106    to tell whether a command has proceeded the inferior past the
107    current location.  */
108 extern ULONGEST get_stop_id (void);
109 
110 /* Reverse execution.  */
111 enum exec_direction_kind
112   {
113     EXEC_FORWARD,
114     EXEC_REVERSE
115   };
116 
117 /* The current execution direction.  */
118 extern enum exec_direction_kind execution_direction;
119 
120 /* Call this to point 'previous_thread' at the thread returned by
121    inferior_thread, or at nullptr, if there's no selected thread.  */
122 extern void update_previous_thread ();
123 
124 /* Get a weak reference to 'previous_thread'.  */
125 extern thread_info *get_previous_thread ();
126 
127 extern void start_remote (int from_tty);
128 
129 /* Clear out all variables saying what to do when inferior is
130    continued or stepped.  First do this, then set the ones you want,
131    then call `proceed'.  STEP indicates whether we're preparing for a
132    step/stepi command.  */
133 extern void clear_proceed_status (int step);
134 
135 extern void proceed (CORE_ADDR, enum gdb_signal);
136 
137 /* Return a ptid representing the set of threads that we will proceed,
138    in the perspective of the user/frontend.  We may actually resume
139    fewer threads at first, e.g., if a thread is stopped at a
140    breakpoint that needs stepping-off, but that should not be visible
141    to the user/frontend, and neither should the frontend/user be
142    allowed to proceed any of the threads that happen to be stopped for
143    internal run control handling, if a previous command wanted them
144    resumed.  */
145 extern ptid_t user_visible_resume_ptid (int step);
146 
147 /* Return the process_stratum target that we will proceed, in the
148    perspective of the user/frontend.  If RESUME_PTID is
149    MINUS_ONE_PTID, then we'll resume all threads of all targets, so
150    the function returns NULL.  Otherwise, we'll be resuming a process
151    or thread of the current process, so we return the current
152    inferior's process stratum target.  */
153 extern process_stratum_target *user_visible_resume_target (ptid_t resume_ptid);
154 
155 /* Return control to GDB when the inferior stops for real.  Print
156    appropriate messages, remove breakpoints, give terminal our modes,
157    and run the stop hook.  Returns true if the stop hook proceeded the
158    target, false otherwise.  */
159 extern bool normal_stop ();
160 
161 /* Return the cached copy of the last target/ptid/waitstatus returned
162    by target_wait().  The data is actually cached by handle_inferior_event(),
163    which gets called immediately after target_wait().  */
164 extern void get_last_target_status (process_stratum_target **target,
165 				    ptid_t *ptid,
166 				    struct target_waitstatus *status);
167 
168 /* Set the cached copy of the last target/ptid/waitstatus.  */
169 extern void set_last_target_status (process_stratum_target *target, ptid_t ptid,
170 				    const target_waitstatus &status);
171 
172 /* Clear the cached copy of the last ptid/waitstatus returned by
173    target_wait().  */
174 extern void nullify_last_target_wait_ptid ();
175 
176 /* Stop all threads.  Only returns after everything is halted.
177 
178    REASON is a string indicating the reason why we stop all threads, used in
179    debug messages.
180 
181    If INF is non-nullptr, stop all threads of that inferior.  Otherwise, stop
182    all threads of all inferiors.  */
183 extern void stop_all_threads (const char *reason, inferior *inf = nullptr);
184 
185 extern void prepare_for_detach (void);
186 
187 extern void fetch_inferior_event ();
188 
189 extern void init_wait_for_inferior (void);
190 
191 extern void insert_step_resume_breakpoint_at_sal (struct gdbarch *,
192 						  struct symtab_and_line ,
193 						  struct frame_id);
194 
195 /* Returns true if we're trying to step past the instruction at
196    ADDRESS in ASPACE.  */
197 extern int stepping_past_instruction_at (struct address_space *aspace,
198 					 CORE_ADDR address);
199 
200 /* Returns true if thread whose thread number is THREAD is stepping
201    over a breakpoint.  */
202 extern int thread_is_stepping_over_breakpoint (int thread);
203 
204 /* Returns true if we're trying to step past an instruction that
205    triggers a non-steppable watchpoint.  */
206 extern int stepping_past_nonsteppable_watchpoint (void);
207 
208 /* Record in TP the frame and location we're currently stepping through.  */
209 extern void set_step_info (thread_info *tp,
210 			   const frame_info_ptr &frame,
211 			   struct symtab_and_line sal);
212 
213 /* Notify interpreters and observers that the current inferior has stopped with
214    signal SIG.  */
215 extern void notify_signal_received (gdb_signal sig);
216 
217 /* Notify interpreters and observers that the current inferior has stopped
218    normally.  */
219 extern void notify_normal_stop (bpstat *bs, int print_frame);
220 
221 /* Notify interpreters and observers that the user focus has changed.  */
222 extern void notify_user_selected_context_changed (user_selected_what selection);
223 
224 /* Several print_*_reason helper functions to print why the inferior
225    has stopped to the passed in UIOUT.  */
226 
227 /* Signal received, print why the inferior has stopped.  */
228 extern void print_signal_received_reason (struct ui_out *uiout,
229 					  enum gdb_signal siggnal);
230 
231 /* The inferior was terminated by a signal, print why it stopped.  */
232 extern void print_signal_exited_reason (struct ui_out *uiout,
233 					enum gdb_signal siggnal);
234 
235 /* The inferior program is finished, print why it stopped.  */
236 extern void print_exited_reason (struct ui_out *uiout, int exitstatus);
237 
238 /* Reverse execution: target ran out of history info, print why the
239    inferior has stopped.  */
240 extern void print_no_history_reason (struct ui_out *uiout);
241 
242 /* Print the result of a function at the end of a 'finish' command.
243    RV points at an object representing the captured return value/type
244    and its position in the value history.  */
245 
246 extern void print_return_value (struct ui_out *uiout,
247 				struct return_value_info *rv);
248 
249 /* Print current location without a level number, if we have changed
250    functions or hit a breakpoint.  Print source line if we have one.
251    If the execution command captured a return value, print it.  If
252    DISPLAYS is false, do not call 'do_displays'.  */
253 
254 extern void print_stop_event (struct ui_out *uiout, bool displays = true);
255 
256 /* Pretty print the results of target_wait, for debugging purposes.  */
257 
258 extern void print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
259 				       const struct target_waitstatus &ws);
260 
261 extern int signal_stop_state (int);
262 
263 extern int signal_print_state (int);
264 
265 extern int signal_pass_state (int);
266 
267 extern int signal_stop_update (int, int);
268 
269 extern int signal_print_update (int, int);
270 
271 extern int signal_pass_update (int, int);
272 
273 extern void update_signals_program_target (void);
274 
275 /* Clear the convenience variables associated with the exit of the
276    inferior.  Currently, those variables are $_exitcode and
277    $_exitsignal.  */
278 extern void clear_exit_convenience_vars (void);
279 
280 extern void update_observer_mode (void);
281 
282 extern void signal_catch_update (const unsigned int *);
283 
284 /* In some circumstances we allow a command to specify a numeric
285    signal.  The idea is to keep these circumstances limited so that
286    users (and scripts) develop portable habits.  For comparison,
287    POSIX.2 `kill' requires that 1,2,3,6,9,14, and 15 work (and using a
288    numeric signal at all is obsolescent.  We are slightly more lenient
289    and allow 1-15 which should match host signal numbers on most
290    systems.  Use of symbolic signal names is strongly encouraged.  */
291 enum gdb_signal gdb_signal_from_command (int num);
292 
293 /* Enables/disables infrun's async event source in the event loop.  */
294 extern void infrun_async (int enable);
295 
296 /* Call infrun's event handler the next time through the event
297    loop.  */
298 extern void mark_infrun_async_event_handler (void);
299 
300 /* The global chain of threads that need to do a step-over operation
301    to get past e.g., a breakpoint.  */
302 extern thread_step_over_list global_thread_step_over_list;
303 
304 /* Remove breakpoints if possible (usually that means, if everything
305    is stopped).  On failure, print a message.  */
306 extern void maybe_remove_breakpoints (void);
307 
308 /* If a UI was in sync execution mode, and now isn't, restore its
309    prompt (a synchronous execution command has finished, and we're
310    ready for input).  */
311 extern void all_uis_check_sync_execution_done (void);
312 
313 /* If a UI was in sync execution mode, and hasn't displayed the prompt
314    yet, re-disable its prompt (a synchronous execution command was
315    started or re-started).  */
316 extern void all_uis_on_sync_execution_starting (void);
317 
318 /* In all-stop, restart the target if it had to be stopped to
319    detach.  */
320 extern void restart_after_all_stop_detach (process_stratum_target *proc_target);
321 
322 /* RAII object to temporarily disable the requirement for target
323    stacks to commit their resumed threads.
324 
325    On construction, set process_stratum_target::commit_resumed_state
326    to false for all process_stratum targets in all target
327    stacks.
328 
329    On destruction (or if reset_and_commit() is called), set
330    process_stratum_target::commit_resumed_state to true for all
331    process_stratum targets in all target stacks, except those that:
332 
333      - have no resumed threads
334      - have a resumed thread with a pending status
335 
336    target_commit_resumed is not called in the destructor, because its
337    implementations could throw, and we don't to swallow that error in
338    a destructor.  Instead, the caller should call the
339    reset_and_commit_resumed() method so that an eventual exception can
340    propagate.  "reset" in the method name refers to the fact that this
341    method has the same effect as the destructor, in addition to
342    committing resumes.
343 
344    The creation of nested scoped_disable_commit_resumed objects is
345    tracked, such that only the outermost instance actually does
346    something, for cases like this:
347 
348      void
349      inner_func ()
350      {
351        scoped_disable_commit_resumed disable;
352 
353        // do stuff
354 
355        disable.reset_and_commit ();
356      }
357 
358      void
359      outer_func ()
360      {
361        scoped_disable_commit_resumed disable;
362 
363        for (... each thread ...)
364 	 inner_func ();
365 
366        disable.reset_and_commit ();
367      }
368 
369    In this case, we don't want the `disable` destructor in
370    `inner_func` to require targets to commit resumed threads, so that
371    the `reset_and_commit()` call in `inner_func` doesn't actually
372    resume threads.  */
373 
374 struct scoped_disable_commit_resumed
375 {
376   explicit scoped_disable_commit_resumed (const char *reason);
377   ~scoped_disable_commit_resumed ();
378 
379   DISABLE_COPY_AND_ASSIGN (scoped_disable_commit_resumed);
380 
381   /* Undoes the disabling done by the ctor, and calls
382      maybe_call_commit_resumed_all_targets().  */
383   void reset_and_commit ();
384 
385 private:
386   /* Undoes the disabling done by the ctor.  */
387   void reset ();
388 
389   /* Whether this object has been reset.  */
390   bool m_reset = false;
391 
392   const char *m_reason;
393   bool m_prev_enable_commit_resumed;
394 };
395 
396 /* Call target_commit_resumed method on all target stacks whose
397    process_stratum target layer has COMMIT_RESUME_STATE set.  */
398 
399 extern void maybe_call_commit_resumed_all_targets ();
400 
401 /* RAII object to temporarily enable the requirement for target stacks
402    to commit their resumed threads.  This is the inverse of
403    scoped_disable_commit_resumed.  The constructor calls the
404    maybe_call_commit_resumed_all_targets function itself, since it's
405    OK to throw from a constructor.  */
406 
407 struct scoped_enable_commit_resumed
408 {
409   explicit scoped_enable_commit_resumed (const char *reason,
410 					 bool force_p = false);
411   ~scoped_enable_commit_resumed ();
412 
413   DISABLE_COPY_AND_ASSIGN (scoped_enable_commit_resumed);
414 
415 private:
416   const char *m_reason;
417   bool m_prev_enable_commit_resumed;
418 };
419 
420 
421 #endif /* INFRUN_H */
422