xref: /netbsd-src/external/gpl3/gdb.old/dist/gdb/ravenscar-thread.c (revision 8b657b0747480f8989760d71343d6dd33f8d4cf9)
1 /* Ada Ravenscar thread support.
2 
3    Copyright (C) 2004-2023 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "gdbcore.h"
22 #include "gdbthread.h"
23 #include "ada-lang.h"
24 #include "target.h"
25 #include "inferior.h"
26 #include "command.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
29 #include "gdbcmd.h"
30 #include "top.h"
31 #include "regcache.h"
32 #include "objfiles.h"
33 #include <unordered_map>
34 
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36    debugging on bare-metal targets.
37 
38    The typical situation is when debugging a bare-metal target over
39    the remote protocol. In that situation, the system does not know
40    about high-level concepts such as threads, only about some code
41    running on one or more CPUs. And since the remote protocol does not
42    provide any handling for CPUs, the de facto standard for handling
43    them is to have one thread per CPU, where the thread's ptid has
44    its lwp field set to the CPU number (eg: 1 for the first CPU,
45    2 for the second one, etc).  This module will make that assumption.
46 
47    This module then creates and maintains the list of threads based
48    on the list of Ada tasks, with one thread per Ada task. The convention
49    is that threads corresponding to the CPUs (see assumption above)
50    have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51    to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52    is the Ada task's ID as extracted from Ada runtime information.
53 
54    Switching to a given Ada task (or its underlying thread) is performed
55    by fetching the registers of that task from the memory area where
56    the registers were saved.  For any of the other operations, the
57    operation is performed by first finding the CPU on which the task
58    is running, switching to its corresponding ptid, and then performing
59    the operation on that ptid using the target beneath us.  */
60 
61 /* If true, ravenscar task support is enabled.  */
62 static bool ravenscar_task_support = true;
63 
64 static const char running_thread_name[] = "__gnat_running_thread_table";
65 
66 static const char known_tasks_name[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name[] = "system__tasking__debug__first_task";
68 
69 static const char ravenscar_runtime_initializer[]
70   = "system__bb__threads__initialize";
71 
72 static const target_info ravenscar_target_info = {
73   "ravenscar",
74   N_("Ravenscar tasks."),
75   N_("Ravenscar tasks support.")
76 };
77 
78 struct ravenscar_thread_target final : public target_ops
79 {
80   ravenscar_thread_target ()
81     : m_base_ptid (inferior_ptid)
82   {
83   }
84 
85   const target_info &info () const override
86   { return ravenscar_target_info; }
87 
88   strata stratum () const override { return thread_stratum; }
89 
90   ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
91   void resume (ptid_t, int, enum gdb_signal) override;
92 
93   void fetch_registers (struct regcache *, int) override;
94   void store_registers (struct regcache *, int) override;
95 
96   void prepare_to_store (struct regcache *) override;
97 
98   bool stopped_by_sw_breakpoint () override;
99 
100   bool stopped_by_hw_breakpoint () override;
101 
102   bool stopped_by_watchpoint () override;
103 
104   bool stopped_data_address (CORE_ADDR *) override;
105 
106   enum target_xfer_status xfer_partial (enum target_object object,
107 					const char *annex,
108 					gdb_byte *readbuf,
109 					const gdb_byte *writebuf,
110 					ULONGEST offset, ULONGEST len,
111 					ULONGEST *xfered_len) override;
112 
113   bool thread_alive (ptid_t ptid) override;
114 
115   int core_of_thread (ptid_t ptid) override;
116 
117   void update_thread_list () override;
118 
119   std::string pid_to_str (ptid_t) override;
120 
121   ptid_t get_ada_task_ptid (long lwp, ULONGEST thread) override;
122 
123   struct btrace_target_info *enable_btrace (thread_info *tp,
124 					    const struct btrace_config *conf)
125     override
126   {
127     process_stratum_target *proc_target
128       = as_process_stratum_target (this->beneath ());
129     ptid_t underlying = get_base_thread_from_ravenscar_task (tp->ptid);
130     tp = find_thread_ptid (proc_target, underlying);
131 
132     return beneath ()->enable_btrace (tp, conf);
133   }
134 
135   void mourn_inferior () override;
136 
137   void close () override
138   {
139     delete this;
140   }
141 
142   thread_info *add_active_thread ();
143 
144 private:
145 
146   /* PTID of the last thread that received an event.
147      This can be useful to determine the associated task that received
148      the event, to make it the current task.  */
149   ptid_t m_base_ptid;
150 
151   ptid_t active_task (int cpu);
152   bool task_is_currently_active (ptid_t ptid);
153   bool runtime_initialized ();
154   int get_thread_base_cpu (ptid_t ptid);
155   ptid_t get_base_thread_from_ravenscar_task (ptid_t ptid);
156   void add_thread (struct ada_task_info *task);
157 
158   /* Like switch_to_thread, but uses the base ptid for the thread.  */
159   void set_base_thread_from_ravenscar_task (ptid_t ptid)
160   {
161     process_stratum_target *proc_target
162       = as_process_stratum_target (this->beneath ());
163     ptid_t underlying = get_base_thread_from_ravenscar_task (ptid);
164     switch_to_thread (find_thread_ptid (proc_target, underlying));
165   }
166 
167   /* Some targets use lazy FPU initialization.  On these, the FP
168      registers for a given task might be uninitialized, or stored in
169      the per-task context, or simply be the live registers on the CPU.
170      This enum is used to encode this information.  */
171   enum fpu_state
172   {
173     /* This target doesn't do anything special for FP registers -- if
174        any exist, they are treated just identical to non-FP
175        registers.  */
176     NOTHING_SPECIAL,
177     /* This target uses the lazy FP scheme, and the FP registers are
178        taken from the CPU.  This can happen for any task, because if a
179        task switch occurs, the registers aren't immediately written to
180        the per-task context -- this is deferred until the current task
181        causes an FPU trap.  */
182     LIVE_FP_REGISTERS,
183     /* This target uses the lazy FP scheme, and the FP registers are
184        not available.  Maybe this task never initialized the FPU, or
185        maybe GDB couldn't find the required symbol.  */
186     NO_FP_REGISTERS
187   };
188 
189   /* Return the FPU state.  */
190   fpu_state get_fpu_state (struct regcache *regcache,
191 			   const ravenscar_arch_ops *arch_ops);
192 
193   /* This maps a TID to the CPU on which it was running.  This is
194      needed because sometimes the runtime will report an active task
195      that hasn't yet been put on the list of tasks that is read by
196      ada-tasks.c.  */
197   std::unordered_map<ULONGEST, int> m_cpu_map;
198 };
199 
200 /* Return true iff PTID corresponds to a ravenscar task.  */
201 
202 static bool
203 is_ravenscar_task (ptid_t ptid)
204 {
205   /* By construction, ravenscar tasks have their LWP set to zero.
206      Also make sure that the TID is nonzero, as some remotes, when
207      asked for the list of threads, will return the first thread
208      as having its TID set to zero.  For instance, TSIM version
209      2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
210      query, which the remote protocol layer then treats as a thread
211      whose TID is 0.  This is obviously not a ravenscar task.  */
212   return ptid.lwp () == 0 && ptid.tid () != 0;
213 }
214 
215 /* Given PTID, which can be either a ravenscar task or a CPU thread,
216    return which CPU that ptid is running on.
217 
218    This assume that PTID is a valid ptid_t.  Otherwise, a gdb_assert
219    will be triggered.  */
220 
221 int
222 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid)
223 {
224   int base_cpu;
225 
226   if (is_ravenscar_task (ptid))
227     {
228       /* Prefer to not read inferior memory if possible, to avoid
229 	 reentrancy problems with xfer_partial.  */
230       auto iter = m_cpu_map.find (ptid.tid ());
231 
232       if (iter != m_cpu_map.end ())
233 	base_cpu = iter->second;
234       else
235 	{
236 	  struct ada_task_info *task_info = ada_get_task_info_from_ptid (ptid);
237 
238 	  gdb_assert (task_info != NULL);
239 	  base_cpu = task_info->base_cpu;
240 	}
241     }
242   else
243     {
244       /* We assume that the LWP of the PTID is equal to the CPU number.  */
245       base_cpu = ptid.lwp ();
246     }
247 
248   return base_cpu;
249 }
250 
251 /* Given a ravenscar task (identified by its ptid_t PTID), return true
252    if this task is the currently active task on the cpu that task is
253    running on.
254 
255    In other words, this function determine which CPU this task is
256    currently running on, and then return nonzero if the CPU in question
257    is executing the code for that task.  If that's the case, then
258    that task's registers are in the CPU bank.  Otherwise, the task
259    is currently suspended, and its registers have been saved in memory.  */
260 
261 bool
262 ravenscar_thread_target::task_is_currently_active (ptid_t ptid)
263 {
264   ptid_t active_task_ptid = active_task (get_thread_base_cpu (ptid));
265 
266   return ptid == active_task_ptid;
267 }
268 
269 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
270    task is running.
271 
272    This is the thread that corresponds to the CPU on which the task
273    is running.  */
274 
275 ptid_t
276 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid)
277 {
278   int base_cpu;
279 
280   if (!is_ravenscar_task (ptid))
281     return ptid;
282 
283   base_cpu = get_thread_base_cpu (ptid);
284   return ptid_t (ptid.pid (), base_cpu);
285 }
286 
287 /* Fetch the ravenscar running thread from target memory, make sure
288    there's a corresponding thread in the thread list, and return it.
289    If the runtime is not initialized, return NULL.  */
290 
291 thread_info *
292 ravenscar_thread_target::add_active_thread ()
293 {
294   process_stratum_target *proc_target
295     = as_process_stratum_target (this->beneath ());
296 
297   int base_cpu;
298 
299   gdb_assert (!is_ravenscar_task (m_base_ptid));
300   base_cpu = get_thread_base_cpu (m_base_ptid);
301 
302   if (!runtime_initialized ())
303     return nullptr;
304 
305   /* It's possible for runtime_initialized to return true but for it
306      not to be fully initialized.  For example, this can happen for a
307      breakpoint placed at the task's beginning.  */
308   ptid_t active_ptid = active_task (base_cpu);
309   if (active_ptid == null_ptid)
310     return nullptr;
311 
312   /* The running thread may not have been added to
313      system.tasking.debug's list yet; so ravenscar_update_thread_list
314      may not always add it to the thread list.  Add it here.  */
315   thread_info *active_thr = find_thread_ptid (proc_target, active_ptid);
316   if (active_thr == nullptr)
317     {
318       active_thr = ::add_thread (proc_target, active_ptid);
319       m_cpu_map[active_ptid.tid ()] = base_cpu;
320     }
321   return active_thr;
322 }
323 
324 /* The Ravenscar Runtime exports a symbol which contains the ID of
325    the thread that is currently running.  Try to locate that symbol
326    and return its associated minimal symbol.
327    Return NULL if not found.  */
328 
329 static struct bound_minimal_symbol
330 get_running_thread_msymbol ()
331 {
332   struct bound_minimal_symbol msym;
333 
334   msym = lookup_minimal_symbol (running_thread_name, NULL, NULL);
335   if (!msym.minsym)
336     /* Older versions of the GNAT runtime were using a different
337        (less ideal) name for the symbol where the active thread ID
338        is stored.  If we couldn't find the symbol using the latest
339        name, then try the old one.  */
340     msym = lookup_minimal_symbol ("running_thread", NULL, NULL);
341 
342   return msym;
343 }
344 
345 /* Return True if the Ada Ravenscar run-time can be found in the
346    application.  */
347 
348 static bool
349 has_ravenscar_runtime ()
350 {
351   struct bound_minimal_symbol msym_ravenscar_runtime_initializer
352     = lookup_minimal_symbol (ravenscar_runtime_initializer, NULL, NULL);
353   struct bound_minimal_symbol msym_known_tasks
354     = lookup_minimal_symbol (known_tasks_name, NULL, NULL);
355   struct bound_minimal_symbol msym_first_task
356     = lookup_minimal_symbol (first_task_name, NULL, NULL);
357   struct bound_minimal_symbol msym_running_thread
358     = get_running_thread_msymbol ();
359 
360   return (msym_ravenscar_runtime_initializer.minsym
361 	  && (msym_known_tasks.minsym || msym_first_task.minsym)
362 	  && msym_running_thread.minsym);
363 }
364 
365 /* Return True if the Ada Ravenscar run-time can be found in the
366    application, and if it has been initialized on target.  */
367 
368 bool
369 ravenscar_thread_target::runtime_initialized ()
370 {
371   return active_task (1) != null_ptid;
372 }
373 
374 /* Return the ID of the thread that is currently running.
375    Return 0 if the ID could not be determined.  */
376 
377 static CORE_ADDR
378 get_running_thread_id (int cpu)
379 {
380   struct bound_minimal_symbol object_msym = get_running_thread_msymbol ();
381   int object_size;
382   int buf_size;
383   gdb_byte *buf;
384   CORE_ADDR object_addr;
385   struct type *builtin_type_void_data_ptr
386     = builtin_type (target_gdbarch ())->builtin_data_ptr;
387 
388   if (!object_msym.minsym)
389     return 0;
390 
391   object_size = builtin_type_void_data_ptr->length ();
392   object_addr = (object_msym.value_address ()
393 		 + (cpu - 1) * object_size);
394   buf_size = object_size;
395   buf = (gdb_byte *) alloca (buf_size);
396   read_memory (object_addr, buf, buf_size);
397   return extract_typed_address (buf, builtin_type_void_data_ptr);
398 }
399 
400 void
401 ravenscar_thread_target::resume (ptid_t ptid, int step,
402 				 enum gdb_signal siggnal)
403 {
404   /* If we see a wildcard resume, we simply pass that on.  Otherwise,
405      arrange to resume the base ptid.  */
406   inferior_ptid = m_base_ptid;
407   if (ptid.is_pid ())
408     {
409       /* We only have one process, so resume all threads of it.  */
410       ptid = minus_one_ptid;
411     }
412   else if (ptid != minus_one_ptid)
413     ptid = m_base_ptid;
414   beneath ()->resume (ptid, step, siggnal);
415 }
416 
417 ptid_t
418 ravenscar_thread_target::wait (ptid_t ptid,
419 			       struct target_waitstatus *status,
420 			       target_wait_flags options)
421 {
422   process_stratum_target *beneath
423     = as_process_stratum_target (this->beneath ());
424   ptid_t event_ptid;
425 
426   if (ptid != minus_one_ptid)
427     ptid = m_base_ptid;
428   event_ptid = beneath->wait (ptid, status, 0);
429   /* Find any new threads that might have been created, and return the
430      active thread.
431 
432      Only do it if the program is still alive, though.  Otherwise,
433      this causes problems when debugging through the remote protocol,
434      because we might try switching threads (and thus sending packets)
435      after the remote has disconnected.  */
436   if (status->kind () != TARGET_WAITKIND_EXITED
437       && status->kind () != TARGET_WAITKIND_SIGNALLED
438       && runtime_initialized ())
439     {
440       m_base_ptid = event_ptid;
441       this->update_thread_list ();
442       thread_info *thr = this->add_active_thread ();
443       if (thr != nullptr)
444 	return thr->ptid;
445     }
446   return event_ptid;
447 }
448 
449 /* Add the thread associated to the given TASK to the thread list
450    (if the thread has already been added, this is a no-op).  */
451 
452 void
453 ravenscar_thread_target::add_thread (struct ada_task_info *task)
454 {
455   if (find_thread_ptid (current_inferior (), task->ptid) == NULL)
456     {
457       ::add_thread (current_inferior ()->process_target (), task->ptid);
458       m_cpu_map[task->ptid.tid ()] = task->base_cpu;
459     }
460 }
461 
462 void
463 ravenscar_thread_target::update_thread_list ()
464 {
465   /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
466      but this isn't always the case in target methods.  So, we ensure
467      it here.  */
468   scoped_restore save_ptid = make_scoped_restore (&inferior_ptid,
469 						  m_base_ptid);
470 
471   /* Do not clear the thread list before adding the Ada task, to keep
472      the thread that the process stratum has included into it
473      (m_base_ptid) and the running thread, that may not have been included
474      to system.tasking.debug's list yet.  */
475 
476   iterate_over_live_ada_tasks ([=] (struct ada_task_info *task)
477 			       {
478 				 this->add_thread (task);
479 			       });
480 }
481 
482 ptid_t
483 ravenscar_thread_target::active_task (int cpu)
484 {
485   CORE_ADDR tid = get_running_thread_id (cpu);
486 
487   if (tid == 0)
488     return null_ptid;
489   else
490     return ptid_t (m_base_ptid.pid (), 0, tid);
491 }
492 
493 bool
494 ravenscar_thread_target::thread_alive (ptid_t ptid)
495 {
496   /* Ravenscar tasks are non-terminating.  */
497   return true;
498 }
499 
500 std::string
501 ravenscar_thread_target::pid_to_str (ptid_t ptid)
502 {
503   if (!is_ravenscar_task (ptid))
504     return beneath ()->pid_to_str (ptid);
505 
506   return string_printf ("Ravenscar Thread 0x%s",
507 			phex_nz (ptid.tid (), sizeof (ULONGEST)));
508 }
509 
510 CORE_ADDR
511 ravenscar_arch_ops::get_stack_base (struct regcache *regcache) const
512 {
513   struct gdbarch *gdbarch = regcache->arch ();
514   const int sp_regnum = gdbarch_sp_regnum (gdbarch);
515   ULONGEST stack_address;
516   regcache_cooked_read_unsigned (regcache, sp_regnum, &stack_address);
517   return (CORE_ADDR) stack_address;
518 }
519 
520 void
521 ravenscar_arch_ops::supply_one_register (struct regcache *regcache,
522 					 int regnum,
523 					 CORE_ADDR descriptor,
524 					 CORE_ADDR stack_base) const
525 {
526   CORE_ADDR addr;
527   if (regnum >= first_stack_register && regnum <= last_stack_register)
528     addr = stack_base;
529   else
530     addr = descriptor;
531   addr += offsets[regnum];
532 
533   struct gdbarch *gdbarch = regcache->arch ();
534   int size = register_size (gdbarch, regnum);
535   gdb_byte *buf = (gdb_byte *) alloca (size);
536   read_memory (addr, buf, size);
537   regcache->raw_supply (regnum, buf);
538 }
539 
540 void
541 ravenscar_arch_ops::fetch_register (struct regcache *regcache,
542 				    int regnum) const
543 {
544   gdb_assert (regnum != -1);
545 
546   struct gdbarch *gdbarch = regcache->arch ();
547   /* The tid is the thread_id field, which is a pointer to the thread.  */
548   CORE_ADDR thread_descriptor_address
549     = (CORE_ADDR) regcache->ptid ().tid ();
550 
551   int sp_regno = -1;
552   CORE_ADDR stack_address = 0;
553   if (regnum >= first_stack_register && regnum <= last_stack_register)
554     {
555       /* We must supply SP for get_stack_base, so recurse.  */
556       sp_regno = gdbarch_sp_regnum (gdbarch);
557       gdb_assert (!(sp_regno >= first_stack_register
558 		    && sp_regno <= last_stack_register));
559       fetch_register (regcache, sp_regno);
560       stack_address = get_stack_base (regcache);
561     }
562 
563   if (regnum < offsets.size () && offsets[regnum] != -1)
564     supply_one_register (regcache, regnum, thread_descriptor_address,
565 			 stack_address);
566 }
567 
568 void
569 ravenscar_arch_ops::store_one_register (struct regcache *regcache, int regnum,
570 					CORE_ADDR descriptor,
571 					CORE_ADDR stack_base) const
572 {
573   CORE_ADDR addr;
574   if (regnum >= first_stack_register && regnum <= last_stack_register)
575     addr = stack_base;
576   else
577     addr = descriptor;
578   addr += offsets[regnum];
579 
580   struct gdbarch *gdbarch = regcache->arch ();
581   int size = register_size (gdbarch, regnum);
582   gdb_byte *buf = (gdb_byte *) alloca (size);
583   regcache->raw_collect (regnum, buf);
584   write_memory (addr, buf, size);
585 }
586 
587 void
588 ravenscar_arch_ops::store_register (struct regcache *regcache,
589 				    int regnum) const
590 {
591   gdb_assert (regnum != -1);
592 
593   /* The tid is the thread_id field, which is a pointer to the thread.  */
594   CORE_ADDR thread_descriptor_address
595     = (CORE_ADDR) regcache->ptid ().tid ();
596 
597   CORE_ADDR stack_address = 0;
598   if (regnum >= first_stack_register && regnum <= last_stack_register)
599     stack_address = get_stack_base (regcache);
600 
601   if (regnum < offsets.size () && offsets[regnum] != -1)
602     store_one_register (regcache, regnum, thread_descriptor_address,
603 			stack_address);
604 }
605 
606 /* Temporarily set the ptid of a regcache to some other value.  When
607    this object is destroyed, the regcache's original ptid is
608    restored.  */
609 
610 class temporarily_change_regcache_ptid
611 {
612 public:
613 
614   temporarily_change_regcache_ptid (struct regcache *regcache, ptid_t new_ptid)
615     : m_regcache (regcache),
616       m_save_ptid (regcache->ptid ())
617   {
618     m_regcache->set_ptid (new_ptid);
619   }
620 
621   ~temporarily_change_regcache_ptid ()
622   {
623     m_regcache->set_ptid (m_save_ptid);
624   }
625 
626 private:
627 
628   /* The regcache.  */
629   struct regcache *m_regcache;
630   /* The saved ptid.  */
631   ptid_t m_save_ptid;
632 };
633 
634 ravenscar_thread_target::fpu_state
635 ravenscar_thread_target::get_fpu_state (struct regcache *regcache,
636 					const ravenscar_arch_ops *arch_ops)
637 {
638   /* We want to return true if the special FP register handling is
639      needed.  If this target doesn't have lazy FP, then no special
640      treatment is ever needed.  */
641   if (!arch_ops->on_demand_fp ())
642     return NOTHING_SPECIAL;
643 
644   bound_minimal_symbol fpu_context
645     = lookup_minimal_symbol ("system__bb__cpu_primitives__current_fpu_context",
646 			     nullptr, nullptr);
647   /* If the symbol can't be found, just fall back.  */
648   if (fpu_context.minsym == nullptr)
649     return NO_FP_REGISTERS;
650 
651   struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
652   ptr_type = lookup_pointer_type (ptr_type);
653   value *val = value_from_pointer (ptr_type, fpu_context.value_address ());
654 
655   int cpu = get_thread_base_cpu (regcache->ptid ());
656   /* The array index type has a lower bound of 1 -- it is Ada code --
657      so subtract 1 here.  */
658   val = value_ptradd (val, cpu - 1);
659 
660   val = value_ind (val);
661   CORE_ADDR fpu_task = value_as_long (val);
662 
663   /* The tid is the thread_id field, which is a pointer to the thread.  */
664   CORE_ADDR thread_descriptor_address
665     = (CORE_ADDR) regcache->ptid ().tid ();
666   if (fpu_task == (thread_descriptor_address
667 		   + arch_ops->get_fpu_context_offset ()))
668     return LIVE_FP_REGISTERS;
669 
670   int v_init_offset = arch_ops->get_v_init_offset ();
671   gdb_byte init = 0;
672   read_memory (thread_descriptor_address + v_init_offset, &init, 1);
673   return init ? NOTHING_SPECIAL : NO_FP_REGISTERS;
674 }
675 
676 void
677 ravenscar_thread_target::fetch_registers (struct regcache *regcache,
678 					  int regnum)
679 {
680   ptid_t ptid = regcache->ptid ();
681 
682   if (runtime_initialized () && is_ravenscar_task (ptid))
683     {
684       struct gdbarch *gdbarch = regcache->arch ();
685       bool is_active = task_is_currently_active (ptid);
686       struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
687       gdb::optional<fpu_state> fp_state;
688 
689       int low_reg = regnum == -1 ? 0 : regnum;
690       int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
691 
692       ptid_t base = get_base_thread_from_ravenscar_task (ptid);
693       for (int i = low_reg; i < high_reg; ++i)
694 	{
695 	  bool use_beneath = false;
696 	  if (arch_ops->is_fp_register (i))
697 	    {
698 	      if (!fp_state.has_value ())
699 		fp_state = get_fpu_state (regcache, arch_ops);
700 	      if (*fp_state == NO_FP_REGISTERS)
701 		continue;
702 	      if (*fp_state == LIVE_FP_REGISTERS
703 		  || (is_active && *fp_state == NOTHING_SPECIAL))
704 		use_beneath = true;
705 	    }
706 	  else
707 	    use_beneath = is_active;
708 
709 	  if (use_beneath)
710 	    {
711 	      temporarily_change_regcache_ptid changer (regcache, base);
712 	      beneath ()->fetch_registers (regcache, i);
713 	    }
714 	  else
715 	    arch_ops->fetch_register (regcache, i);
716 	}
717     }
718   else
719     beneath ()->fetch_registers (regcache, regnum);
720 }
721 
722 void
723 ravenscar_thread_target::store_registers (struct regcache *regcache,
724 					  int regnum)
725 {
726   ptid_t ptid = regcache->ptid ();
727 
728   if (runtime_initialized () && is_ravenscar_task (ptid))
729     {
730       struct gdbarch *gdbarch = regcache->arch ();
731       bool is_active = task_is_currently_active (ptid);
732       struct ravenscar_arch_ops *arch_ops = gdbarch_ravenscar_ops (gdbarch);
733       gdb::optional<fpu_state> fp_state;
734 
735       int low_reg = regnum == -1 ? 0 : regnum;
736       int high_reg = regnum == -1 ? gdbarch_num_regs (gdbarch) : regnum + 1;
737 
738       ptid_t base = get_base_thread_from_ravenscar_task (ptid);
739       for (int i = low_reg; i < high_reg; ++i)
740 	{
741 	  bool use_beneath = false;
742 	  if (arch_ops->is_fp_register (i))
743 	    {
744 	      if (!fp_state.has_value ())
745 		fp_state = get_fpu_state (regcache, arch_ops);
746 	      if (*fp_state == NO_FP_REGISTERS)
747 		continue;
748 	      if (*fp_state == LIVE_FP_REGISTERS
749 		  || (is_active && *fp_state == NOTHING_SPECIAL))
750 		use_beneath = true;
751 	    }
752 	  else
753 	    use_beneath = is_active;
754 
755 	  if (use_beneath)
756 	    {
757 	      temporarily_change_regcache_ptid changer (regcache, base);
758 	      beneath ()->store_registers (regcache, i);
759 	    }
760 	  else
761 	    arch_ops->store_register (regcache, i);
762 	}
763     }
764   else
765     beneath ()->store_registers (regcache, regnum);
766 }
767 
768 void
769 ravenscar_thread_target::prepare_to_store (struct regcache *regcache)
770 {
771   ptid_t ptid = regcache->ptid ();
772 
773   if (runtime_initialized () && is_ravenscar_task (ptid))
774     {
775       if (task_is_currently_active (ptid))
776 	{
777 	  ptid_t base = get_base_thread_from_ravenscar_task (ptid);
778 	  temporarily_change_regcache_ptid changer (regcache, base);
779 	  beneath ()->prepare_to_store (regcache);
780 	}
781       else
782 	{
783 	  /* Nothing.  */
784 	}
785     }
786   else
787     beneath ()->prepare_to_store (regcache);
788 }
789 
790 /* Implement the to_stopped_by_sw_breakpoint target_ops "method".  */
791 
792 bool
793 ravenscar_thread_target::stopped_by_sw_breakpoint ()
794 {
795   scoped_restore_current_thread saver;
796   set_base_thread_from_ravenscar_task (inferior_ptid);
797   return beneath ()->stopped_by_sw_breakpoint ();
798 }
799 
800 /* Implement the to_stopped_by_hw_breakpoint target_ops "method".  */
801 
802 bool
803 ravenscar_thread_target::stopped_by_hw_breakpoint ()
804 {
805   scoped_restore_current_thread saver;
806   set_base_thread_from_ravenscar_task (inferior_ptid);
807   return beneath ()->stopped_by_hw_breakpoint ();
808 }
809 
810 /* Implement the to_stopped_by_watchpoint target_ops "method".  */
811 
812 bool
813 ravenscar_thread_target::stopped_by_watchpoint ()
814 {
815   scoped_restore_current_thread saver;
816   set_base_thread_from_ravenscar_task (inferior_ptid);
817   return beneath ()->stopped_by_watchpoint ();
818 }
819 
820 /* Implement the to_stopped_data_address target_ops "method".  */
821 
822 bool
823 ravenscar_thread_target::stopped_data_address (CORE_ADDR *addr_p)
824 {
825   scoped_restore_current_thread saver;
826   set_base_thread_from_ravenscar_task (inferior_ptid);
827   return beneath ()->stopped_data_address (addr_p);
828 }
829 
830 void
831 ravenscar_thread_target::mourn_inferior ()
832 {
833   m_base_ptid = null_ptid;
834   target_ops *beneath = this->beneath ();
835   current_inferior ()->unpush_target (this);
836   beneath->mourn_inferior ();
837 }
838 
839 /* Implement the to_core_of_thread target_ops "method".  */
840 
841 int
842 ravenscar_thread_target::core_of_thread (ptid_t ptid)
843 {
844   scoped_restore_current_thread saver;
845   set_base_thread_from_ravenscar_task (inferior_ptid);
846   return beneath ()->core_of_thread (inferior_ptid);
847 }
848 
849 /* Implement the target xfer_partial method.  */
850 
851 enum target_xfer_status
852 ravenscar_thread_target::xfer_partial (enum target_object object,
853 				       const char *annex,
854 				       gdb_byte *readbuf,
855 				       const gdb_byte *writebuf,
856 				       ULONGEST offset, ULONGEST len,
857 				       ULONGEST *xfered_len)
858 {
859   scoped_restore save_ptid = make_scoped_restore (&inferior_ptid);
860   /* Calling get_base_thread_from_ravenscar_task can read memory from
861      the inferior.  However, that function is written to prefer our
862      internal map, so it should not result in recursive calls in
863      practice.  */
864   inferior_ptid = get_base_thread_from_ravenscar_task (inferior_ptid);
865   return beneath ()->xfer_partial (object, annex, readbuf, writebuf,
866 				   offset, len, xfered_len);
867 }
868 
869 /* Observer on inferior_created: push ravenscar thread stratum if needed.  */
870 
871 static void
872 ravenscar_inferior_created (inferior *inf)
873 {
874   const char *err_msg;
875 
876   if (!ravenscar_task_support
877       || gdbarch_ravenscar_ops (target_gdbarch ()) == NULL
878       || !has_ravenscar_runtime ())
879     return;
880 
881   err_msg = ada_get_tcb_types_info ();
882   if (err_msg != NULL)
883     {
884       warning (_("%s. Task/thread support disabled."), err_msg);
885       return;
886     }
887 
888   ravenscar_thread_target *rtarget = new ravenscar_thread_target ();
889   inf->push_target (target_ops_up (rtarget));
890   thread_info *thr = rtarget->add_active_thread ();
891   if (thr != nullptr)
892     switch_to_thread (thr);
893 }
894 
895 ptid_t
896 ravenscar_thread_target::get_ada_task_ptid (long lwp, ULONGEST thread)
897 {
898   return ptid_t (m_base_ptid.pid (), 0, thread);
899 }
900 
901 /* Command-list for the "set/show ravenscar" prefix command.  */
902 static struct cmd_list_element *set_ravenscar_list;
903 static struct cmd_list_element *show_ravenscar_list;
904 
905 /* Implement the "show ravenscar task-switching" command.  */
906 
907 static void
908 show_ravenscar_task_switching_command (struct ui_file *file, int from_tty,
909 				       struct cmd_list_element *c,
910 				       const char *value)
911 {
912   if (ravenscar_task_support)
913     gdb_printf (file, _("\
914 Support for Ravenscar task/thread switching is enabled\n"));
915   else
916     gdb_printf (file, _("\
917 Support for Ravenscar task/thread switching is disabled\n"));
918 }
919 
920 /* Module startup initialization function, automagically called by
921    init.c.  */
922 
923 void _initialize_ravenscar ();
924 void
925 _initialize_ravenscar ()
926 {
927   /* Notice when the inferior is created in order to push the
928      ravenscar ops if needed.  */
929   gdb::observers::inferior_created.attach (ravenscar_inferior_created,
930 					   "ravenscar-thread");
931 
932   add_setshow_prefix_cmd
933     ("ravenscar", no_class,
934      _("Prefix command for changing Ravenscar-specific settings."),
935      _("Prefix command for showing Ravenscar-specific settings."),
936      &set_ravenscar_list, &show_ravenscar_list,
937      &setlist, &showlist);
938 
939   add_setshow_boolean_cmd ("task-switching", class_obscure,
940 			   &ravenscar_task_support, _("\
941 Enable or disable support for GNAT Ravenscar tasks."), _("\
942 Show whether support for GNAT Ravenscar tasks is enabled."),
943 			   _("\
944 Enable or disable support for task/thread switching with the GNAT\n\
945 Ravenscar run-time library for bareboard configuration."),
946 			   NULL, show_ravenscar_task_switching_command,
947 			   &set_ravenscar_list, &show_ravenscar_list);
948 }
949