xref: /netbsd-src/external/gpl3/gdb/dist/gdbserver/linux-low.cc (revision 64f917f5a88990e32dd65fcd4348042fa7f852b9)
1 /* Low level interface to ptrace, for the remote server for GDB.
2    Copyright (C) 1995-2024 Free Software Foundation, Inc.
3 
4    This file is part of GDB.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
18 
19 #include "linux-low.h"
20 #include "nat/linux-osdata.h"
21 #include "gdbsupport/agent.h"
22 #include "tdesc.h"
23 #include "gdbsupport/event-loop.h"
24 #include "gdbsupport/event-pipe.h"
25 #include "gdbsupport/rsp-low.h"
26 #include "gdbsupport/signals-state-save-restore.h"
27 #include "nat/linux-nat.h"
28 #include "nat/linux-waitpid.h"
29 #include "gdbsupport/gdb_wait.h"
30 #include "nat/gdb_ptrace.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include <signal.h>
35 #include <sys/ioctl.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include <langinfo.h>
47 #include <iconv.h>
48 #include "gdbsupport/filestuff.h"
49 #include "gdbsupport/gdb-safe-ctype.h"
50 #include "tracepoint.h"
51 #include <inttypes.h>
52 #include "gdbsupport/common-inferior.h"
53 #include "nat/fork-inferior.h"
54 #include "gdbsupport/environ.h"
55 #include "gdbsupport/gdb-sigmask.h"
56 #include "gdbsupport/scoped_restore.h"
57 #ifndef ELFMAG0
58 /* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
59    then ELFMAG0 will have been defined.  If it didn't get included by
60    gdb_proc_service.h then including it will likely introduce a duplicate
61    definition of elf_fpregset_t.  */
62 #include <elf.h>
63 #endif
64 #include "nat/linux-namespaces.h"
65 
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69 
70 #ifndef AT_HWCAP2
71 #define AT_HWCAP2 26
72 #endif
73 
74 /* Some targets did not define these ptrace constants from the start,
75    so gdbserver defines them locally here.  In the future, these may
76    be removed after they are added to asm/ptrace.h.  */
77 #if !(defined(PT_TEXT_ADDR) \
78       || defined(PT_DATA_ADDR) \
79       || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels.  */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR  51*4
85 /* These are still undefined in 3.10 kernels.  */
86 #elif defined(__TMS320C6X__)
87 #define PT_TEXT_ADDR     (0x10000*4)
88 #define PT_DATA_ADDR     (0x10004*4)
89 #define PT_TEXT_END_ADDR (0x10008*4)
90 #endif
91 #endif
92 
93 #if (defined(__UCLIBC__)		\
94      && defined(HAS_NOMMU)		\
95      && defined(PT_TEXT_ADDR)		\
96      && defined(PT_DATA_ADDR)		\
97      && defined(PT_TEXT_END_ADDR))
98 #define SUPPORTS_READ_OFFSETS
99 #endif
100 
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "gdbsupport/btrace-common.h"
104 #endif
105 
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h.  */
108 typedef struct
109 {
110   uint32_t a_type;		/* Entry type */
111   union
112     {
113       uint32_t a_val;		/* Integer value */
114       /* We use to have pointer elements added here.  We cannot do that,
115 	 though, since it does not work when using 32-bit definitions
116 	 on 64-bit platforms and vice versa.  */
117     } a_un;
118 } Elf32_auxv_t;
119 #endif
120 
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h.  */
123 typedef struct
124 {
125   uint64_t a_type;		/* Entry type */
126   union
127     {
128       uint64_t a_val;		/* Integer value */
129       /* We use to have pointer elements added here.  We cannot do that,
130 	 though, since it does not work when using 32-bit definitions
131 	 on 64-bit platforms and vice versa.  */
132     } a_un;
133 } Elf64_auxv_t;
134 #endif
135 
136 /* Does the current host support PTRACE_GETREGSET?  */
137 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
138 
139 /* Return TRUE if THREAD is the leader thread of the process.  */
140 
141 static bool
142 is_leader (thread_info *thread)
143 {
144   ptid_t ptid = ptid_of (thread);
145   return ptid.pid () == ptid.lwp ();
146 }
147 
148 /* Return true if we should report thread exit events to GDB, for
149    THR.  */
150 
151 static bool
152 report_exit_events_for (thread_info *thr)
153 {
154   client_state &cs = get_client_state ();
155 
156   return (cs.report_thread_events
157 	  || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
158 }
159 
160 /* LWP accessors.  */
161 
162 /* See nat/linux-nat.h.  */
163 
164 ptid_t
165 ptid_of_lwp (struct lwp_info *lwp)
166 {
167   return ptid_of (get_lwp_thread (lwp));
168 }
169 
170 /* See nat/linux-nat.h.  */
171 
172 void
173 lwp_set_arch_private_info (struct lwp_info *lwp,
174 			   struct arch_lwp_info *info)
175 {
176   lwp->arch_private = info;
177 }
178 
179 /* See nat/linux-nat.h.  */
180 
181 struct arch_lwp_info *
182 lwp_arch_private_info (struct lwp_info *lwp)
183 {
184   return lwp->arch_private;
185 }
186 
187 /* See nat/linux-nat.h.  */
188 
189 int
190 lwp_is_stopped (struct lwp_info *lwp)
191 {
192   return lwp->stopped;
193 }
194 
195 /* See nat/linux-nat.h.  */
196 
197 enum target_stop_reason
198 lwp_stop_reason (struct lwp_info *lwp)
199 {
200   return lwp->stop_reason;
201 }
202 
203 /* See nat/linux-nat.h.  */
204 
205 int
206 lwp_is_stepping (struct lwp_info *lwp)
207 {
208   return lwp->stepping;
209 }
210 
211 /* A list of all unknown processes which receive stop signals.  Some
212    other process will presumably claim each of these as forked
213    children momentarily.  */
214 
215 struct simple_pid_list
216 {
217   /* The process ID.  */
218   int pid;
219 
220   /* The status as reported by waitpid.  */
221   int status;
222 
223   /* Next in chain.  */
224   struct simple_pid_list *next;
225 };
226 static struct simple_pid_list *stopped_pids;
227 
228 /* Trivial list manipulation functions to keep track of a list of new
229    stopped processes.  */
230 
231 static void
232 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
233 {
234   struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
235 
236   new_pid->pid = pid;
237   new_pid->status = status;
238   new_pid->next = *listp;
239   *listp = new_pid;
240 }
241 
242 static int
243 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
244 {
245   struct simple_pid_list **p;
246 
247   for (p = listp; *p != NULL; p = &(*p)->next)
248     if ((*p)->pid == pid)
249       {
250 	struct simple_pid_list *next = (*p)->next;
251 
252 	*statusp = (*p)->status;
253 	xfree (*p);
254 	*p = next;
255 	return 1;
256       }
257   return 0;
258 }
259 
260 enum stopping_threads_kind
261   {
262     /* Not stopping threads presently.  */
263     NOT_STOPPING_THREADS,
264 
265     /* Stopping threads.  */
266     STOPPING_THREADS,
267 
268     /* Stopping and suspending threads.  */
269     STOPPING_AND_SUSPENDING_THREADS
270   };
271 
272 /* This is set while stop_all_lwps is in effect.  */
273 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
274 
275 /* FIXME make into a target method?  */
276 int using_threads = 1;
277 
278 /* True if we're presently stabilizing threads (moving them out of
279    jump pads).  */
280 static int stabilizing_threads;
281 
282 static void unsuspend_all_lwps (struct lwp_info *except);
283 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
284 			   bool thread_event);
285 static int lwp_is_marked_dead (struct lwp_info *lwp);
286 static int kill_lwp (unsigned long lwpid, int signo);
287 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
288 static int linux_low_ptrace_options (int attached);
289 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
290 
291 /* When the event-loop is doing a step-over, this points at the thread
292    being stepped.  */
293 static ptid_t step_over_bkpt;
294 
295 bool
296 linux_process_target::low_supports_breakpoints ()
297 {
298   return false;
299 }
300 
301 CORE_ADDR
302 linux_process_target::low_get_pc (regcache *regcache)
303 {
304   return 0;
305 }
306 
307 void
308 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
309 {
310   gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
311 }
312 
313 std::vector<CORE_ADDR>
314 linux_process_target::low_get_next_pcs (regcache *regcache)
315 {
316   gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 			  "implemented");
318 }
319 
320 int
321 linux_process_target::low_decr_pc_after_break ()
322 {
323   return 0;
324 }
325 
326 /* True if LWP is stopped in its stepping range.  */
327 
328 static int
329 lwp_in_step_range (struct lwp_info *lwp)
330 {
331   CORE_ADDR pc = lwp->stop_pc;
332 
333   return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334 }
335 
336 /* The event pipe registered as a waitable file in the event loop.  */
337 static event_pipe linux_event_pipe;
338 
339 /* True if we're currently in async mode.  */
340 #define target_is_async_p() (linux_event_pipe.is_open ())
341 
342 static void send_sigstop (struct lwp_info *lwp);
343 
344 /* Return non-zero if HEADER is a 64-bit ELF file.  */
345 
346 static int
347 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
348 {
349   if (header->e_ident[EI_MAG0] == ELFMAG0
350       && header->e_ident[EI_MAG1] == ELFMAG1
351       && header->e_ident[EI_MAG2] == ELFMAG2
352       && header->e_ident[EI_MAG3] == ELFMAG3)
353     {
354       *machine = header->e_machine;
355       return header->e_ident[EI_CLASS] == ELFCLASS64;
356 
357     }
358   *machine = EM_NONE;
359   return -1;
360 }
361 
362 /* Return non-zero if FILE is a 64-bit ELF file,
363    zero if the file is not a 64-bit ELF file,
364    and -1 if the file is not accessible or doesn't exist.  */
365 
366 static int
367 elf_64_file_p (const char *file, unsigned int *machine)
368 {
369   Elf64_Ehdr header;
370   int fd;
371 
372   fd = open (file, O_RDONLY);
373   if (fd < 0)
374     return -1;
375 
376   if (read (fd, &header, sizeof (header)) != sizeof (header))
377     {
378       close (fd);
379       return 0;
380     }
381   close (fd);
382 
383   return elf_64_header_p (&header, machine);
384 }
385 
386 /* Accepts an integer PID; Returns true if the executable PID is
387    running is a 64-bit ELF file..  */
388 
389 int
390 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
391 {
392   char file[PATH_MAX];
393 
394   sprintf (file, "/proc/%d/exe", pid);
395   return elf_64_file_p (file, machine);
396 }
397 
398 void
399 linux_process_target::delete_lwp (lwp_info *lwp)
400 {
401   struct thread_info *thr = get_lwp_thread (lwp);
402 
403   threads_debug_printf ("deleting %ld", lwpid_of (thr));
404 
405   remove_thread (thr);
406 
407   low_delete_thread (lwp->arch_private);
408 
409   delete lwp;
410 }
411 
412 void
413 linux_process_target::low_delete_thread (arch_lwp_info *info)
414 {
415   /* Default implementation should be overridden if architecture-specific
416      info is being used.  */
417   gdb_assert (info == nullptr);
418 }
419 
420 /* Open the /proc/PID/mem file for PROC.  */
421 
422 static void
423 open_proc_mem_file (process_info *proc)
424 {
425   gdb_assert (proc->priv->mem_fd == -1);
426 
427   char filename[64];
428   xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
429 
430   proc->priv->mem_fd
431     = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
432 }
433 
434 process_info *
435 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
436 {
437   struct process_info *proc;
438 
439   proc = add_process (pid, attached);
440   proc->priv = XCNEW (struct process_info_private);
441 
442   proc->priv->arch_private = low_new_process ();
443   proc->priv->mem_fd = -1;
444 
445   return proc;
446 }
447 
448 
449 process_info *
450 linux_process_target::add_linux_process (int pid, int attached)
451 {
452   process_info *proc = add_linux_process_no_mem_file (pid, attached);
453   open_proc_mem_file (proc);
454   return proc;
455 }
456 
457 void
458 linux_process_target::remove_linux_process (process_info *proc)
459 {
460   if (proc->priv->mem_fd >= 0)
461     close (proc->priv->mem_fd);
462 
463   this->low_delete_process (proc->priv->arch_private);
464 
465   xfree (proc->priv);
466   proc->priv = nullptr;
467 
468   remove_process (proc);
469 }
470 
471 arch_process_info *
472 linux_process_target::low_new_process ()
473 {
474   return nullptr;
475 }
476 
477 void
478 linux_process_target::low_delete_process (arch_process_info *info)
479 {
480   /* Default implementation must be overridden if architecture-specific
481      info exists.  */
482   gdb_assert (info == nullptr);
483 }
484 
485 void
486 linux_process_target::low_new_fork (process_info *parent, process_info *child)
487 {
488   /* Nop.  */
489 }
490 
491 void
492 linux_process_target::arch_setup_thread (thread_info *thread)
493 {
494   scoped_restore_current_thread restore_thread;
495   switch_to_thread (thread);
496 
497   low_arch_setup ();
498 }
499 
500 int
501 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
502 					    int wstat)
503 {
504   client_state &cs = get_client_state ();
505   struct lwp_info *event_lwp = *orig_event_lwp;
506   int event = linux_ptrace_get_extended_event (wstat);
507   struct thread_info *event_thr = get_lwp_thread (event_lwp);
508 
509   gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
510 
511   /* All extended events we currently use are mid-syscall.  Only
512      PTRACE_EVENT_STOP is delivered more like a signal-stop, but
513      you have to be using PTRACE_SEIZE to get that.  */
514   event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
515 
516   if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
517       || (event == PTRACE_EVENT_CLONE))
518     {
519       unsigned long new_pid;
520       int ret, status;
521 
522       /* Get the pid of the new lwp.  */
523       ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
524 	      &new_pid);
525 
526       /* If we haven't already seen the new PID stop, wait for it now.  */
527       if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
528 	{
529 	  /* The new child has a pending SIGSTOP.  We can't affect it until it
530 	     hits the SIGSTOP, but we're already attached.  */
531 
532 	  ret = my_waitpid (new_pid, &status, __WALL);
533 
534 	  if (ret == -1)
535 	    perror_with_name ("waiting for new child");
536 	  else if (ret != new_pid)
537 	    warning ("wait returned unexpected PID %d", ret);
538 	  else if (!WIFSTOPPED (status))
539 	    warning ("wait returned unexpected status 0x%x", status);
540 	}
541 
542       if (debug_threads)
543 	{
544 	  debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
545 			(event == PTRACE_EVENT_FORK ? "fork"
546 			 : event == PTRACE_EVENT_VFORK ? "vfork"
547 			 : event == PTRACE_EVENT_CLONE ? "clone"
548 			 : "???"),
549 			ptid_of (event_thr).lwp (),
550 			new_pid);
551 	}
552 
553       ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
554 			   ? ptid_t (new_pid, new_pid)
555 			   : ptid_t (ptid_of (event_thr).pid (), new_pid));
556 
557       process_info *child_proc = nullptr;
558 
559       if (event != PTRACE_EVENT_CLONE)
560 	{
561 	  /* Add the new process to the tables before we add the LWP.
562 	     We need to do this even if the new process will be
563 	     detached.  See breakpoint cloning code further below.  */
564 	  child_proc = add_linux_process (new_pid, 0);
565 	}
566 
567       lwp_info *child_lwp = add_lwp (child_ptid);
568       gdb_assert (child_lwp != NULL);
569       child_lwp->stopped = 1;
570       if (event != PTRACE_EVENT_CLONE)
571 	child_lwp->must_set_ptrace_flags = 1;
572       child_lwp->status_pending_p = 0;
573 
574       thread_info *child_thr = get_lwp_thread (child_lwp);
575 
576       /* If we're suspending all threads, leave this one suspended
577 	 too.  If the fork/clone parent is stepping over a breakpoint,
578 	 all other threads have been suspended already.  Leave the
579 	 child suspended too.  */
580       if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
581 	  || event_lwp->bp_reinsert != 0)
582 	{
583 	  threads_debug_printf ("leaving child suspended");
584 	  child_lwp->suspended = 1;
585 	}
586 
587       if (event_lwp->bp_reinsert != 0
588 	  && supports_software_single_step ()
589 	  && event == PTRACE_EVENT_VFORK)
590 	{
591 	  /* If we leave single-step breakpoints there, child will
592 	     hit it, so uninsert single-step breakpoints from parent
593 	     (and child).  Once vfork child is done, reinsert
594 	     them back to parent.  */
595 	  uninsert_single_step_breakpoints (event_thr);
596 	}
597 
598       if (event != PTRACE_EVENT_CLONE)
599 	{
600 	  /* Clone the breakpoint lists of the parent.  We need to do
601 	     this even if the new process will be detached, since we
602 	     will need the process object and the breakpoints to
603 	     remove any breakpoints from memory when we detach, and
604 	     the client side will access registers.  */
605 	  gdb_assert (child_proc != NULL);
606 
607 	  process_info *parent_proc = get_thread_process (event_thr);
608 	  child_proc->attached = parent_proc->attached;
609 
610 	  clone_all_breakpoints (child_thr, event_thr);
611 
612 	  target_desc_up tdesc = allocate_target_description ();
613 	  copy_target_description (tdesc.get (), parent_proc->tdesc);
614 	  child_proc->tdesc = tdesc.release ();
615 
616 	  /* Clone arch-specific process data.  */
617 	  low_new_fork (parent_proc, child_proc);
618 	}
619 
620       /* Save fork/clone info in the parent thread.  */
621       if (event == PTRACE_EVENT_FORK)
622 	event_lwp->waitstatus.set_forked (child_ptid);
623       else if (event == PTRACE_EVENT_VFORK)
624 	event_lwp->waitstatus.set_vforked (child_ptid);
625       else if (event == PTRACE_EVENT_CLONE
626 	       && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
627 	event_lwp->waitstatus.set_thread_cloned (child_ptid);
628 
629       if (event != PTRACE_EVENT_CLONE
630 	  || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
631 	{
632 	  /* The status_pending field contains bits denoting the
633 	     extended event, so when the pending event is handled, the
634 	     handler will look at lwp->waitstatus.  */
635 	  event_lwp->status_pending_p = 1;
636 	  event_lwp->status_pending = wstat;
637 
638 	  /* Link the threads until the parent's event is passed on to
639 	     GDB.  */
640 	  event_lwp->relative = child_lwp;
641 	  child_lwp->relative = event_lwp;
642 	}
643 
644       /* If the parent thread is doing step-over with single-step
645 	 breakpoints, the list of single-step breakpoints are cloned
646 	 from the parent's.  Remove them from the child process.
647 	 In case of vfork, we'll reinsert them back once vforked
648 	 child is done.  */
649       if (event_lwp->bp_reinsert != 0
650 	  && supports_software_single_step ())
651 	{
652 	  /* The child process is forked and stopped, so it is safe
653 	     to access its memory without stopping all other threads
654 	     from other processes.  */
655 	  delete_single_step_breakpoints (child_thr);
656 
657 	  gdb_assert (has_single_step_breakpoints (event_thr));
658 	  gdb_assert (!has_single_step_breakpoints (child_thr));
659 	}
660 
661       /* Normally we will get the pending SIGSTOP.  But in some cases
662 	 we might get another signal delivered to the group first.
663 	 If we do get another signal, be sure not to lose it.  */
664       if (WSTOPSIG (status) != SIGSTOP)
665 	{
666 	  child_lwp->stop_expected = 1;
667 	  child_lwp->status_pending_p = 1;
668 	  child_lwp->status_pending = status;
669 	}
670       else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
671 	{
672 	  child_lwp->waitstatus.set_thread_created ();
673 	  child_lwp->status_pending_p = 1;
674 	  child_lwp->status_pending = status;
675 	}
676 
677       if (event == PTRACE_EVENT_CLONE)
678 	{
679 #ifdef USE_THREAD_DB
680 	  thread_db_notice_clone (event_thr, child_ptid);
681 #endif
682 	}
683 
684       if (event == PTRACE_EVENT_CLONE
685 	  && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
686 	{
687 	  threads_debug_printf
688 	    ("not reporting clone event from LWP %ld, new child is %ld\n",
689 	     ptid_of (event_thr).lwp (),
690 	     new_pid);
691 	  return 1;
692 	}
693 
694       /* Leave the child stopped until GDB processes the parent
695 	 event.  */
696       child_thr->last_resume_kind = resume_stop;
697       child_thr->last_status.set_stopped (GDB_SIGNAL_0);
698 
699       /* Report the event.  */
700       threads_debug_printf
701 	("reporting %s event from LWP %ld, new child is %ld\n",
702 	 (event == PTRACE_EVENT_FORK ? "fork"
703 	  : event == PTRACE_EVENT_VFORK ? "vfork"
704 	  : event == PTRACE_EVENT_CLONE ? "clone"
705 	  : "???"),
706 	 ptid_of (event_thr).lwp (),
707 	 new_pid);
708       return 0;
709     }
710   else if (event == PTRACE_EVENT_VFORK_DONE)
711     {
712       event_lwp->waitstatus.set_vfork_done ();
713 
714       if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
715 	{
716 	  reinsert_single_step_breakpoints (event_thr);
717 
718 	  gdb_assert (has_single_step_breakpoints (event_thr));
719 	}
720 
721       /* Report the event.  */
722       return 0;
723     }
724   else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
725     {
726       struct process_info *proc;
727       std::vector<int> syscalls_to_catch;
728       ptid_t event_ptid;
729       pid_t event_pid;
730 
731       threads_debug_printf ("Got exec event from LWP %ld",
732 			    lwpid_of (event_thr));
733 
734       /* Get the event ptid.  */
735       event_ptid = ptid_of (event_thr);
736       event_pid = event_ptid.pid ();
737 
738       /* Save the syscall list from the execing process.  */
739       proc = get_thread_process (event_thr);
740       syscalls_to_catch = std::move (proc->syscalls_to_catch);
741 
742       /* Delete the execing process and all its threads.  */
743       mourn (proc);
744       switch_to_thread (nullptr);
745 
746       /* Create a new process/lwp/thread.  */
747       proc = add_linux_process (event_pid, 0);
748       event_lwp = add_lwp (event_ptid);
749       event_thr = get_lwp_thread (event_lwp);
750       gdb_assert (current_thread == event_thr);
751       arch_setup_thread (event_thr);
752 
753       /* Set the event status.  */
754       event_lwp->waitstatus.set_execd
755 	(make_unique_xstrdup
756 	   (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
757 
758       /* Mark the exec status as pending.  */
759       event_lwp->stopped = 1;
760       event_lwp->status_pending_p = 1;
761       event_lwp->status_pending = wstat;
762       event_thr->last_resume_kind = resume_continue;
763       event_thr->last_status.set_ignore ();
764 
765       /* Update syscall state in the new lwp, effectively mid-syscall too.  */
766       event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
767 
768       /* Restore the list to catch.  Don't rely on the client, which is free
769 	 to avoid sending a new list when the architecture doesn't change.
770 	 Also, for ANY_SYSCALL, the architecture doesn't really matter.  */
771       proc->syscalls_to_catch = std::move (syscalls_to_catch);
772 
773       /* Report the event.  */
774       *orig_event_lwp = event_lwp;
775       return 0;
776     }
777 
778   internal_error (_("unknown ptrace event %d"), event);
779 }
780 
781 CORE_ADDR
782 linux_process_target::get_pc (lwp_info *lwp)
783 {
784   process_info *proc = get_thread_process (get_lwp_thread (lwp));
785   gdb_assert (!proc->starting_up);
786 
787   if (!low_supports_breakpoints ())
788     return 0;
789 
790   scoped_restore_current_thread restore_thread;
791   switch_to_thread (get_lwp_thread (lwp));
792 
793   struct regcache *regcache = get_thread_regcache (current_thread, 1);
794   CORE_ADDR pc = low_get_pc (regcache);
795 
796   threads_debug_printf ("pc is 0x%lx", (long) pc);
797 
798   return pc;
799 }
800 
801 void
802 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
803 {
804   struct regcache *regcache;
805 
806   scoped_restore_current_thread restore_thread;
807   switch_to_thread (get_lwp_thread (lwp));
808 
809   regcache = get_thread_regcache (current_thread, 1);
810   low_get_syscall_trapinfo (regcache, sysno);
811 
812   threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
813 }
814 
815 void
816 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
817 {
818   /* By default, report an unknown system call number.  */
819   *sysno = UNKNOWN_SYSCALL;
820 }
821 
822 bool
823 linux_process_target::save_stop_reason (lwp_info *lwp)
824 {
825   CORE_ADDR pc;
826   CORE_ADDR sw_breakpoint_pc;
827   siginfo_t siginfo;
828 
829   if (!low_supports_breakpoints ())
830     return false;
831 
832   process_info *proc = get_thread_process (get_lwp_thread (lwp));
833   if (proc->starting_up)
834     {
835       /* Claim we have the stop PC so that the caller doesn't try to
836 	 fetch it itself.  */
837       return true;
838     }
839 
840   pc = get_pc (lwp);
841   sw_breakpoint_pc = pc - low_decr_pc_after_break ();
842 
843   /* breakpoint_at reads from the current thread.  */
844   scoped_restore_current_thread restore_thread;
845   switch_to_thread (get_lwp_thread (lwp));
846 
847   if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
848 	      (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
849     {
850       if (siginfo.si_signo == SIGTRAP)
851 	{
852 	  if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
853 	      && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
854 	    {
855 	      /* The si_code is ambiguous on this arch -- check debug
856 		 registers.  */
857 	      if (!check_stopped_by_watchpoint (lwp))
858 		lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
859 	    }
860 	  else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
861 	    {
862 	      /* If we determine the LWP stopped for a SW breakpoint,
863 		 trust it.  Particularly don't check watchpoint
864 		 registers, because at least on s390, we'd find
865 		 stopped-by-watchpoint as long as there's a watchpoint
866 		 set.  */
867 	      lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
868 	    }
869 	  else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
870 	    {
871 	      /* This can indicate either a hardware breakpoint or
872 		 hardware watchpoint.  Check debug registers.  */
873 	      if (!check_stopped_by_watchpoint (lwp))
874 		lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
875 	    }
876 	  else if (siginfo.si_code == TRAP_TRACE)
877 	    {
878 	      /* We may have single stepped an instruction that
879 		 triggered a watchpoint.  In that case, on some
880 		 architectures (such as x86), instead of TRAP_HWBKPT,
881 		 si_code indicates TRAP_TRACE, and we need to check
882 		 the debug registers separately.  */
883 	      if (!check_stopped_by_watchpoint (lwp))
884 		lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
885 	    }
886 	}
887     }
888 
889   if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
890     {
891       threads_debug_printf
892 	("%s stopped by software breakpoint",
893 	 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
894 
895       /* Back up the PC if necessary.  */
896       if (pc != sw_breakpoint_pc)
897 	{
898 	  struct regcache *regcache
899 	    = get_thread_regcache (current_thread, 1);
900 	  low_set_pc (regcache, sw_breakpoint_pc);
901 	}
902 
903       /* Update this so we record the correct stop PC below.  */
904       pc = sw_breakpoint_pc;
905     }
906   else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
907     threads_debug_printf
908       ("%s stopped by hardware breakpoint",
909        target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
910   else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
911     threads_debug_printf
912       ("%s stopped by hardware watchpoint",
913        target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
914   else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
915     threads_debug_printf
916       ("%s stopped by trace",
917        target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
918 
919   lwp->stop_pc = pc;
920   return true;
921 }
922 
923 lwp_info *
924 linux_process_target::add_lwp (ptid_t ptid)
925 {
926   lwp_info *lwp = new lwp_info;
927 
928   lwp->thread = add_thread (ptid, lwp);
929 
930   low_new_thread (lwp);
931 
932   return lwp;
933 }
934 
935 void
936 linux_process_target::low_new_thread (lwp_info *info)
937 {
938   /* Nop.  */
939 }
940 
941 /* Callback to be used when calling fork_inferior, responsible for
942    actually initiating the tracing of the inferior.  */
943 
944 static void
945 linux_ptrace_fun ()
946 {
947   if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
948 	      (PTRACE_TYPE_ARG4) 0) < 0)
949     trace_start_error_with_name ("ptrace");
950 
951   if (setpgid (0, 0) < 0)
952     trace_start_error_with_name ("setpgid");
953 
954   /* If GDBserver is connected to gdb via stdio, redirect the inferior's
955      stdout to stderr so that inferior i/o doesn't corrupt the connection.
956      Also, redirect stdin to /dev/null.  */
957   if (remote_connection_is_stdio ())
958     {
959       if (close (0) < 0)
960 	trace_start_error_with_name ("close");
961       if (open ("/dev/null", O_RDONLY) < 0)
962 	trace_start_error_with_name ("open");
963       if (dup2 (2, 1) < 0)
964 	trace_start_error_with_name ("dup2");
965       if (write (2, "stdin/stdout redirected\n",
966 		 sizeof ("stdin/stdout redirected\n") - 1) < 0)
967 	{
968 	  /* Errors ignored.  */;
969 	}
970     }
971 }
972 
973 /* Start an inferior process and returns its pid.
974    PROGRAM is the name of the program to be started, and PROGRAM_ARGS
975    are its arguments.  */
976 
977 int
978 linux_process_target::create_inferior (const char *program,
979 				       const std::vector<char *> &program_args)
980 {
981   client_state &cs = get_client_state ();
982   struct lwp_info *new_lwp;
983   int pid;
984   ptid_t ptid;
985 
986   {
987     maybe_disable_address_space_randomization restore_personality
988       (cs.disable_randomization);
989     std::string str_program_args = construct_inferior_arguments (program_args);
990 
991     pid = fork_inferior (program,
992 			 str_program_args.c_str (),
993 			 get_environ ()->envp (), linux_ptrace_fun,
994 			 NULL, NULL, NULL, NULL);
995   }
996 
997   /* When spawning a new process, we can't open the mem file yet.  We
998      still have to nurse the process through the shell, and that execs
999      a couple times.  The address space a /proc/PID/mem file is
1000      accessing is destroyed on exec.  */
1001   process_info *proc = add_linux_process_no_mem_file (pid, 0);
1002 
1003   ptid = ptid_t (pid, pid);
1004   new_lwp = add_lwp (ptid);
1005   new_lwp->must_set_ptrace_flags = 1;
1006 
1007   post_fork_inferior (pid, program);
1008 
1009   /* PROC is now past the shell running the program we want, so we can
1010      open the /proc/PID/mem file.  */
1011   open_proc_mem_file (proc);
1012 
1013   return pid;
1014 }
1015 
1016 /* Implement the post_create_inferior target_ops method.  */
1017 
1018 void
1019 linux_process_target::post_create_inferior ()
1020 {
1021   struct lwp_info *lwp = get_thread_lwp (current_thread);
1022 
1023   low_arch_setup ();
1024 
1025   if (lwp->must_set_ptrace_flags)
1026     {
1027       struct process_info *proc = current_process ();
1028       int options = linux_low_ptrace_options (proc->attached);
1029 
1030       linux_enable_event_reporting (lwpid_of (current_thread), options);
1031       lwp->must_set_ptrace_flags = 0;
1032     }
1033 }
1034 
1035 int
1036 linux_process_target::attach_lwp (ptid_t ptid)
1037 {
1038   struct lwp_info *new_lwp;
1039   int lwpid = ptid.lwp ();
1040 
1041   if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1042       != 0)
1043     return errno;
1044 
1045   new_lwp = add_lwp (ptid);
1046 
1047   /* We need to wait for SIGSTOP before being able to make the next
1048      ptrace call on this LWP.  */
1049   new_lwp->must_set_ptrace_flags = 1;
1050 
1051   if (linux_proc_pid_is_stopped (lwpid))
1052     {
1053       threads_debug_printf ("Attached to a stopped process");
1054 
1055       /* The process is definitely stopped.  It is in a job control
1056 	 stop, unless the kernel predates the TASK_STOPPED /
1057 	 TASK_TRACED distinction, in which case it might be in a
1058 	 ptrace stop.  Make sure it is in a ptrace stop; from there we
1059 	 can kill it, signal it, et cetera.
1060 
1061 	 First make sure there is a pending SIGSTOP.  Since we are
1062 	 already attached, the process can not transition from stopped
1063 	 to running without a PTRACE_CONT; so we know this signal will
1064 	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
1065 	 probably already in the queue (unless this kernel is old
1066 	 enough to use TASK_STOPPED for ptrace stops); but since
1067 	 SIGSTOP is not an RT signal, it can only be queued once.  */
1068       kill_lwp (lwpid, SIGSTOP);
1069 
1070       /* Finally, resume the stopped process.  This will deliver the
1071 	 SIGSTOP (or a higher priority signal, just like normal
1072 	 PTRACE_ATTACH), which we'll catch later on.  */
1073       ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1074     }
1075 
1076   /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1077      brings it to a halt.
1078 
1079      There are several cases to consider here:
1080 
1081      1) gdbserver has already attached to the process and is being notified
1082 	of a new thread that is being created.
1083 	In this case we should ignore that SIGSTOP and resume the
1084 	process.  This is handled below by setting stop_expected = 1,
1085 	and the fact that add_thread sets last_resume_kind ==
1086 	resume_continue.
1087 
1088      2) This is the first thread (the process thread), and we're attaching
1089 	to it via attach_inferior.
1090 	In this case we want the process thread to stop.
1091 	This is handled by having linux_attach set last_resume_kind ==
1092 	resume_stop after we return.
1093 
1094 	If the pid we are attaching to is also the tgid, we attach to and
1095 	stop all the existing threads.  Otherwise, we attach to pid and
1096 	ignore any other threads in the same group as this pid.
1097 
1098      3) GDB is connecting to gdbserver and is requesting an enumeration of all
1099 	existing threads.
1100 	In this case we want the thread to stop.
1101 	FIXME: This case is currently not properly handled.
1102 	We should wait for the SIGSTOP but don't.  Things work apparently
1103 	because enough time passes between when we ptrace (ATTACH) and when
1104 	gdb makes the next ptrace call on the thread.
1105 
1106      On the other hand, if we are currently trying to stop all threads, we
1107      should treat the new thread as if we had sent it a SIGSTOP.  This works
1108      because we are guaranteed that the add_lwp call above added us to the
1109      end of the list, and so the new thread has not yet reached
1110      wait_for_sigstop (but will).  */
1111   new_lwp->stop_expected = 1;
1112 
1113   return 0;
1114 }
1115 
1116 /* Callback for linux_proc_attach_tgid_threads.  Attach to PTID if not
1117    already attached.  Returns true if a new LWP is found, false
1118    otherwise.  */
1119 
1120 static int
1121 attach_proc_task_lwp_callback (ptid_t ptid)
1122 {
1123   /* Is this a new thread?  */
1124   if (find_thread_ptid (ptid) == NULL)
1125     {
1126       int lwpid = ptid.lwp ();
1127       int err;
1128 
1129       threads_debug_printf ("Found new lwp %d", lwpid);
1130 
1131       err = the_linux_target->attach_lwp (ptid);
1132 
1133       /* Be quiet if we simply raced with the thread exiting.  EPERM
1134 	 is returned if the thread's task still exists, and is marked
1135 	 as exited or zombie, as well as other conditions, so in that
1136 	 case, confirm the status in /proc/PID/status.  */
1137       if (err == ESRCH
1138 	  || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1139 	threads_debug_printf
1140 	  ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1141 	   lwpid, err, safe_strerror (err));
1142       else if (err != 0)
1143 	{
1144 	  std::string reason
1145 	    = linux_ptrace_attach_fail_reason_string (ptid, err);
1146 
1147 	  error (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1148 	}
1149 
1150       return 1;
1151     }
1152   return 0;
1153 }
1154 
1155 static void async_file_mark (void);
1156 
1157 /* Attach to PID.  If PID is the tgid, attach to it and all
1158    of its threads.  */
1159 
1160 int
1161 linux_process_target::attach (unsigned long pid)
1162 {
1163   struct process_info *proc;
1164   struct thread_info *initial_thread;
1165   ptid_t ptid = ptid_t (pid, pid);
1166   int err;
1167 
1168   /* Delay opening the /proc/PID/mem file until we've successfully
1169      attached.  */
1170   proc = add_linux_process_no_mem_file (pid, 1);
1171 
1172   /* Attach to PID.  We will check for other threads
1173      soon.  */
1174   err = attach_lwp (ptid);
1175   if (err != 0)
1176     {
1177       this->remove_linux_process (proc);
1178 
1179       std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1180       error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1181     }
1182 
1183   open_proc_mem_file (proc);
1184 
1185   /* Don't ignore the initial SIGSTOP if we just attached to this
1186      process.  It will be collected by wait shortly.  */
1187   initial_thread = find_thread_ptid (ptid_t (pid, pid));
1188   gdb_assert (initial_thread != nullptr);
1189   initial_thread->last_resume_kind = resume_stop;
1190 
1191   /* We must attach to every LWP.  If /proc is mounted, use that to
1192      find them now.  On the one hand, the inferior may be using raw
1193      clone instead of using pthreads.  On the other hand, even if it
1194      is using pthreads, GDB may not be connected yet (thread_db needs
1195      to do symbol lookups, through qSymbol).  Also, thread_db walks
1196      structures in the inferior's address space to find the list of
1197      threads/LWPs, and those structures may well be corrupted.  Note
1198      that once thread_db is loaded, we'll still use it to list threads
1199      and associate pthread info with each LWP.  */
1200   try
1201     {
1202       linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1203     }
1204   catch (const gdb_exception_error &)
1205     {
1206       /* Make sure we do not deliver the SIGSTOP to the process.  */
1207       initial_thread->last_resume_kind = resume_continue;
1208 
1209       this->detach (proc);
1210       throw;
1211     }
1212 
1213   /* GDB will shortly read the xml target description for this
1214      process, to figure out the process' architecture.  But the target
1215      description is only filled in when the first process/thread in
1216      the thread group reports its initial PTRACE_ATTACH SIGSTOP.  Do
1217      that now, otherwise, if GDB is fast enough, it could read the
1218      target description _before_ that initial stop.  */
1219   if (non_stop)
1220     {
1221       struct lwp_info *lwp;
1222       int wstat, lwpid;
1223       ptid_t pid_ptid = ptid_t (pid);
1224 
1225       lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1226       gdb_assert (lwpid > 0);
1227 
1228       lwp = find_lwp_pid (ptid_t (lwpid));
1229       gdb_assert (lwp != nullptr);
1230 
1231       if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1232 	{
1233 	  lwp->status_pending_p = 1;
1234 	  lwp->status_pending = wstat;
1235 	}
1236 
1237       initial_thread->last_resume_kind = resume_continue;
1238 
1239       async_file_mark ();
1240 
1241       gdb_assert (proc->tdesc != NULL);
1242     }
1243 
1244   return 0;
1245 }
1246 
1247 static int
1248 last_thread_of_process_p (int pid)
1249 {
1250   bool seen_one = false;
1251 
1252   thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1253     {
1254       if (!seen_one)
1255 	{
1256 	  /* This is the first thread of this process we see.  */
1257 	  seen_one = true;
1258 	  return false;
1259 	}
1260       else
1261 	{
1262 	  /* This is the second thread of this process we see.  */
1263 	  return true;
1264 	}
1265     });
1266 
1267   return thread == NULL;
1268 }
1269 
1270 /* Kill LWP.  */
1271 
1272 static void
1273 linux_kill_one_lwp (struct lwp_info *lwp)
1274 {
1275   struct thread_info *thr = get_lwp_thread (lwp);
1276   int pid = lwpid_of (thr);
1277 
1278   /* PTRACE_KILL is unreliable.  After stepping into a signal handler,
1279      there is no signal context, and ptrace(PTRACE_KILL) (or
1280      ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1281      ptrace(CONT, pid, 0,0) and just resumes the tracee.  A better
1282      alternative is to kill with SIGKILL.  We only need one SIGKILL
1283      per process, not one for each thread.  But since we still support
1284      support debugging programs using raw clone without CLONE_THREAD,
1285      we send one for each thread.  For years, we used PTRACE_KILL
1286      only, so we're being a bit paranoid about some old kernels where
1287      PTRACE_KILL might work better (dubious if there are any such, but
1288      that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1289      second, and so we're fine everywhere.  */
1290 
1291   errno = 0;
1292   kill_lwp (pid, SIGKILL);
1293   if (debug_threads)
1294     {
1295       int save_errno = errno;
1296 
1297       threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1298 			    target_pid_to_str (ptid_of (thr)).c_str (),
1299 			    save_errno ? safe_strerror (save_errno) : "OK");
1300     }
1301 
1302   errno = 0;
1303   ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1304   if (debug_threads)
1305     {
1306       int save_errno = errno;
1307 
1308       threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1309 			    target_pid_to_str (ptid_of (thr)).c_str (),
1310 			    save_errno ? safe_strerror (save_errno) : "OK");
1311     }
1312 }
1313 
1314 /* Kill LWP and wait for it to die.  */
1315 
1316 static void
1317 kill_wait_lwp (struct lwp_info *lwp)
1318 {
1319   struct thread_info *thr = get_lwp_thread (lwp);
1320   int pid = ptid_of (thr).pid ();
1321   int lwpid = ptid_of (thr).lwp ();
1322   int wstat;
1323   int res;
1324 
1325   threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1326 
1327   do
1328     {
1329       linux_kill_one_lwp (lwp);
1330 
1331       /* Make sure it died.  Notes:
1332 
1333 	 - The loop is most likely unnecessary.
1334 
1335 	 - We don't use wait_for_event as that could delete lwps
1336 	   while we're iterating over them.  We're not interested in
1337 	   any pending status at this point, only in making sure all
1338 	   wait status on the kernel side are collected until the
1339 	   process is reaped.
1340 
1341 	 - We don't use __WALL here as the __WALL emulation relies on
1342 	   SIGCHLD, and killing a stopped process doesn't generate
1343 	   one, nor an exit status.
1344       */
1345       res = my_waitpid (lwpid, &wstat, 0);
1346       if (res == -1 && errno == ECHILD)
1347 	res = my_waitpid (lwpid, &wstat, __WCLONE);
1348     } while (res > 0 && WIFSTOPPED (wstat));
1349 
1350   /* Even if it was stopped, the child may have already disappeared.
1351      E.g., if it was killed by SIGKILL.  */
1352   if (res < 0 && errno != ECHILD)
1353     perror_with_name ("kill_wait_lwp");
1354 }
1355 
1356 /* Callback for `for_each_thread'.  Kills an lwp of a given process,
1357    except the leader.  */
1358 
1359 static void
1360 kill_one_lwp_callback (thread_info *thread, int pid)
1361 {
1362   struct lwp_info *lwp = get_thread_lwp (thread);
1363 
1364   /* We avoid killing the first thread here, because of a Linux kernel (at
1365      least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1366      the children get a chance to be reaped, it will remain a zombie
1367      forever.  */
1368 
1369   if (lwpid_of (thread) == pid)
1370     {
1371       threads_debug_printf ("is last of process %s",
1372 			    target_pid_to_str (thread->id).c_str ());
1373       return;
1374     }
1375 
1376   kill_wait_lwp (lwp);
1377 }
1378 
1379 int
1380 linux_process_target::kill (process_info *process)
1381 {
1382   int pid = process->pid;
1383 
1384   /* If we're killing a running inferior, make sure it is stopped
1385      first, as PTRACE_KILL will not work otherwise.  */
1386   stop_all_lwps (0, NULL);
1387 
1388   for_each_thread (pid, [&] (thread_info *thread)
1389     {
1390       kill_one_lwp_callback (thread, pid);
1391     });
1392 
1393   /* See the comment in linux_kill_one_lwp.  We did not kill the first
1394      thread in the list, so do so now.  */
1395   lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1396 
1397   if (lwp == NULL)
1398     threads_debug_printf ("cannot find lwp for pid: %d", pid);
1399   else
1400     kill_wait_lwp (lwp);
1401 
1402   mourn (process);
1403 
1404   /* Since we presently can only stop all lwps of all processes, we
1405      need to unstop lwps of other processes.  */
1406   unstop_all_lwps (0, NULL);
1407   return 0;
1408 }
1409 
1410 /* Get pending signal of THREAD, for detaching purposes.  This is the
1411    signal the thread last stopped for, which we need to deliver to the
1412    thread when detaching, otherwise, it'd be suppressed/lost.  */
1413 
1414 static int
1415 get_detach_signal (struct thread_info *thread)
1416 {
1417   client_state &cs = get_client_state ();
1418   enum gdb_signal signo = GDB_SIGNAL_0;
1419   int status;
1420   struct lwp_info *lp = get_thread_lwp (thread);
1421 
1422   if (lp->status_pending_p)
1423     status = lp->status_pending;
1424   else
1425     {
1426       /* If the thread had been suspended by gdbserver, and it stopped
1427 	 cleanly, then it'll have stopped with SIGSTOP.  But we don't
1428 	 want to deliver that SIGSTOP.  */
1429       if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1430 	  || thread->last_status.sig () == GDB_SIGNAL_0)
1431 	return 0;
1432 
1433       /* Otherwise, we may need to deliver the signal we
1434 	 intercepted.  */
1435       status = lp->last_status;
1436     }
1437 
1438   if (!WIFSTOPPED (status))
1439     {
1440       threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1441 			    target_pid_to_str (ptid_of (thread)).c_str ());
1442       return 0;
1443     }
1444 
1445   /* Extended wait statuses aren't real SIGTRAPs.  */
1446   if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1447     {
1448       threads_debug_printf ("lwp %s had stopped with extended "
1449 			    "status: no pending signal",
1450 			    target_pid_to_str (ptid_of (thread)).c_str ());
1451       return 0;
1452     }
1453 
1454   signo = gdb_signal_from_host (WSTOPSIG (status));
1455 
1456   if (cs.program_signals_p && !cs.program_signals[signo])
1457     {
1458       threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1459 			    target_pid_to_str (ptid_of (thread)).c_str (),
1460 			    gdb_signal_to_string (signo));
1461       return 0;
1462     }
1463   else if (!cs.program_signals_p
1464 	   /* If we have no way to know which signals GDB does not
1465 	      want to have passed to the program, assume
1466 	      SIGTRAP/SIGINT, which is GDB's default.  */
1467 	   && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1468     {
1469       threads_debug_printf ("lwp %s had signal %s, "
1470 			    "but we don't know if we should pass it. "
1471 			    "Default to not.",
1472 			    target_pid_to_str (ptid_of (thread)).c_str (),
1473 			    gdb_signal_to_string (signo));
1474       return 0;
1475     }
1476   else
1477     {
1478       threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1479 			    target_pid_to_str (ptid_of (thread)).c_str (),
1480 			    gdb_signal_to_string (signo));
1481 
1482       return WSTOPSIG (status);
1483     }
1484 }
1485 
1486 void
1487 linux_process_target::detach_one_lwp (lwp_info *lwp)
1488 {
1489   struct thread_info *thread = get_lwp_thread (lwp);
1490   int sig;
1491   int lwpid;
1492 
1493   /* If there is a pending SIGSTOP, get rid of it.  */
1494   if (lwp->stop_expected)
1495     {
1496       threads_debug_printf ("Sending SIGCONT to %s",
1497 			    target_pid_to_str (ptid_of (thread)).c_str ());
1498 
1499       kill_lwp (lwpid_of (thread), SIGCONT);
1500       lwp->stop_expected = 0;
1501     }
1502 
1503   /* Pass on any pending signal for this thread.  */
1504   sig = get_detach_signal (thread);
1505 
1506   /* Preparing to resume may try to write registers, and fail if the
1507      lwp is zombie.  If that happens, ignore the error.  We'll handle
1508      it below, when detach fails with ESRCH.  */
1509   try
1510     {
1511       /* Flush any pending changes to the process's registers.  */
1512       regcache_invalidate_thread (thread);
1513 
1514       /* Finally, let it resume.  */
1515       low_prepare_to_resume (lwp);
1516     }
1517   catch (const gdb_exception_error &ex)
1518     {
1519       if (!check_ptrace_stopped_lwp_gone (lwp))
1520 	throw;
1521     }
1522 
1523   lwpid = lwpid_of (thread);
1524   if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1525 	      (PTRACE_TYPE_ARG4) (long) sig) < 0)
1526     {
1527       int save_errno = errno;
1528 
1529       /* We know the thread exists, so ESRCH must mean the lwp is
1530 	 zombie.  This can happen if one of the already-detached
1531 	 threads exits the whole thread group.  In that case we're
1532 	 still attached, and must reap the lwp.  */
1533       if (save_errno == ESRCH)
1534 	{
1535 	  int ret, status;
1536 
1537 	  ret = my_waitpid (lwpid, &status, __WALL);
1538 	  if (ret == -1)
1539 	    {
1540 	      warning (_("Couldn't reap LWP %d while detaching: %s"),
1541 		       lwpid, safe_strerror (errno));
1542 	    }
1543 	  else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1544 	    {
1545 	      warning (_("Reaping LWP %d while detaching "
1546 			 "returned unexpected status 0x%x"),
1547 		       lwpid, status);
1548 	    }
1549 	}
1550       else
1551 	{
1552 	  error (_("Can't detach %s: %s"),
1553 		 target_pid_to_str (ptid_of (thread)).c_str (),
1554 		 safe_strerror (save_errno));
1555 	}
1556     }
1557   else
1558     threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1559 			  target_pid_to_str (ptid_of (thread)).c_str (),
1560 			  strsignal (sig));
1561 
1562   delete_lwp (lwp);
1563 }
1564 
1565 int
1566 linux_process_target::detach (process_info *process)
1567 {
1568   struct lwp_info *main_lwp;
1569 
1570   /* As there's a step over already in progress, let it finish first,
1571      otherwise nesting a stabilize_threads operation on top gets real
1572      messy.  */
1573   complete_ongoing_step_over ();
1574 
1575   /* Stop all threads before detaching.  First, ptrace requires that
1576      the thread is stopped to successfully detach.  Second, thread_db
1577      may need to uninstall thread event breakpoints from memory, which
1578      only works with a stopped process anyway.  */
1579   stop_all_lwps (0, NULL);
1580 
1581 #ifdef USE_THREAD_DB
1582   thread_db_detach (process);
1583 #endif
1584 
1585   /* Stabilize threads (move out of jump pads).  */
1586   target_stabilize_threads ();
1587 
1588   /* Detach from the clone lwps first.  If the thread group exits just
1589      while we're detaching, we must reap the clone lwps before we're
1590      able to reap the leader.  */
1591   for_each_thread (process->pid, [this] (thread_info *thread)
1592     {
1593       /* We don't actually detach from the thread group leader just yet.
1594 	 If the thread group exits, we must reap the zombie clone lwps
1595 	 before we're able to reap the leader.  */
1596       if (thread->id.pid () == thread->id.lwp ())
1597 	return;
1598 
1599       lwp_info *lwp = get_thread_lwp (thread);
1600       detach_one_lwp (lwp);
1601     });
1602 
1603   main_lwp = find_lwp_pid (ptid_t (process->pid));
1604   gdb_assert (main_lwp != nullptr);
1605   detach_one_lwp (main_lwp);
1606 
1607   mourn (process);
1608 
1609   /* Since we presently can only stop all lwps of all processes, we
1610      need to unstop lwps of other processes.  */
1611   unstop_all_lwps (0, NULL);
1612   return 0;
1613 }
1614 
1615 /* Remove all LWPs that belong to process PROC from the lwp list.  */
1616 
1617 void
1618 linux_process_target::mourn (process_info *process)
1619 {
1620 #ifdef USE_THREAD_DB
1621   thread_db_mourn (process);
1622 #endif
1623 
1624   for_each_thread (process->pid, [this] (thread_info *thread)
1625     {
1626       delete_lwp (get_thread_lwp (thread));
1627     });
1628 
1629   this->remove_linux_process (process);
1630 }
1631 
1632 void
1633 linux_process_target::join (int pid)
1634 {
1635   int status, ret;
1636 
1637   do {
1638     ret = my_waitpid (pid, &status, 0);
1639     if (WIFEXITED (status) || WIFSIGNALED (status))
1640       break;
1641   } while (ret != -1 || errno != ECHILD);
1642 }
1643 
1644 /* Return true if the given thread is still alive.  */
1645 
1646 bool
1647 linux_process_target::thread_alive (ptid_t ptid)
1648 {
1649   struct lwp_info *lwp = find_lwp_pid (ptid);
1650 
1651   /* We assume we always know if a thread exits.  If a whole process
1652      exited but we still haven't been able to report it to GDB, we'll
1653      hold on to the last lwp of the dead process.  */
1654   if (lwp != NULL)
1655     return !lwp_is_marked_dead (lwp);
1656   else
1657     return 0;
1658 }
1659 
1660 bool
1661 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1662 {
1663   struct lwp_info *lp = get_thread_lwp (thread);
1664 
1665   if (!lp->status_pending_p)
1666     return 0;
1667 
1668   if (thread->last_resume_kind != resume_stop
1669       && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1670 	  || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1671     {
1672       CORE_ADDR pc;
1673       int discard = 0;
1674 
1675       gdb_assert (lp->last_status != 0);
1676 
1677       pc = get_pc (lp);
1678 
1679       scoped_restore_current_thread restore_thread;
1680       switch_to_thread (thread);
1681 
1682       if (pc != lp->stop_pc)
1683 	{
1684 	  threads_debug_printf ("PC of %ld changed",
1685 				lwpid_of (thread));
1686 	  discard = 1;
1687 	}
1688 
1689       if (discard)
1690 	{
1691 	  threads_debug_printf ("discarding pending breakpoint status");
1692 	  lp->status_pending_p = 0;
1693 	  return 0;
1694 	}
1695     }
1696 
1697   return 1;
1698 }
1699 
1700 /* Returns true if LWP is resumed from the client's perspective.  */
1701 
1702 static int
1703 lwp_resumed (struct lwp_info *lwp)
1704 {
1705   struct thread_info *thread = get_lwp_thread (lwp);
1706 
1707   if (thread->last_resume_kind != resume_stop)
1708     return 1;
1709 
1710   /* Did gdb send us a `vCont;t', but we haven't reported the
1711      corresponding stop to gdb yet?  If so, the thread is still
1712      resumed/running from gdb's perspective.  */
1713   if (thread->last_resume_kind == resume_stop
1714       && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1715     return 1;
1716 
1717   return 0;
1718 }
1719 
1720 bool
1721 linux_process_target::status_pending_p_callback (thread_info *thread,
1722 						 ptid_t ptid)
1723 {
1724   struct lwp_info *lp = get_thread_lwp (thread);
1725 
1726   /* Check if we're only interested in events from a specific process
1727      or a specific LWP.  */
1728   if (!thread->id.matches (ptid))
1729     return 0;
1730 
1731   if (!lwp_resumed (lp))
1732     return 0;
1733 
1734   if (lp->status_pending_p
1735       && !thread_still_has_status_pending (thread))
1736     {
1737       resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1738       return 0;
1739     }
1740 
1741   return lp->status_pending_p;
1742 }
1743 
1744 struct lwp_info *
1745 find_lwp_pid (ptid_t ptid)
1746 {
1747   long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1748   thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1749     {
1750       return thr_arg->id.lwp () == lwp;
1751     });
1752 
1753   if (thread == NULL)
1754     return NULL;
1755 
1756   return get_thread_lwp (thread);
1757 }
1758 
1759 /* Return the number of known LWPs in the tgid given by PID.  */
1760 
1761 static int
1762 num_lwps (int pid)
1763 {
1764   int count = 0;
1765 
1766   for_each_thread (pid, [&] (thread_info *thread)
1767     {
1768       count++;
1769     });
1770 
1771   return count;
1772 }
1773 
1774 /* See nat/linux-nat.h.  */
1775 
1776 struct lwp_info *
1777 iterate_over_lwps (ptid_t filter,
1778 		   gdb::function_view<iterate_over_lwps_ftype> callback)
1779 {
1780   thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1781     {
1782       lwp_info *lwp = get_thread_lwp (thr_arg);
1783 
1784       return callback (lwp);
1785     });
1786 
1787   if (thread == NULL)
1788     return NULL;
1789 
1790   return get_thread_lwp (thread);
1791 }
1792 
1793 bool
1794 linux_process_target::check_zombie_leaders ()
1795 {
1796   bool new_pending_event = false;
1797 
1798   for_each_process ([&] (process_info *proc)
1799     {
1800       pid_t leader_pid = pid_of (proc);
1801       lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1802 
1803       threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1804 			    "num_lwps=%d, zombie=%d",
1805 			    leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1806 			    linux_proc_pid_is_zombie (leader_pid));
1807 
1808       if (leader_lp != NULL && !leader_lp->stopped
1809 	  /* Check if there are other threads in the group, as we may
1810 	     have raced with the inferior simply exiting.  Note this
1811 	     isn't a watertight check.  If the inferior is
1812 	     multi-threaded and is exiting, it may be we see the
1813 	     leader as zombie before we reap all the non-leader
1814 	     threads.  See comments below.  */
1815 	  && !last_thread_of_process_p (leader_pid)
1816 	  && linux_proc_pid_is_zombie (leader_pid))
1817 	{
1818 	  /* A zombie leader in a multi-threaded program can mean one
1819 	     of three things:
1820 
1821 	     #1 - Only the leader exited, not the whole program, e.g.,
1822 	     with pthread_exit.  Since we can't reap the leader's exit
1823 	     status until all other threads are gone and reaped too,
1824 	     we want to delete the zombie leader right away, as it
1825 	     can't be debugged, we can't read its registers, etc.
1826 	     This is the main reason we check for zombie leaders
1827 	     disappearing.
1828 
1829 	     #2 - The whole thread-group/process exited (a group exit,
1830 	     via e.g. exit(3), and there is (or will be shortly) an
1831 	     exit reported for each thread in the process, and then
1832 	     finally an exit for the leader once the non-leaders are
1833 	     reaped.
1834 
1835 	     #3 - There are 3 or more threads in the group, and a
1836 	     thread other than the leader exec'd.  See comments on
1837 	     exec events at the top of the file.
1838 
1839 	     Ideally we would never delete the leader for case #2.
1840 	     Instead, we want to collect the exit status of each
1841 	     non-leader thread, and then finally collect the exit
1842 	     status of the leader as normal and use its exit code as
1843 	     whole-process exit code.  Unfortunately, there's no
1844 	     race-free way to distinguish cases #1 and #2.  We can't
1845 	     assume the exit events for the non-leaders threads are
1846 	     already pending in the kernel, nor can we assume the
1847 	     non-leader threads are in zombie state already.  Between
1848 	     the leader becoming zombie and the non-leaders exiting
1849 	     and becoming zombie themselves, there's a small time
1850 	     window, so such a check would be racy.  Temporarily
1851 	     pausing all threads and checking to see if all threads
1852 	     exit or not before re-resuming them would work in the
1853 	     case that all threads are running right now, but it
1854 	     wouldn't work if some thread is currently already
1855 	     ptrace-stopped, e.g., due to scheduler-locking.
1856 
1857 	     So what we do is we delete the leader anyhow, and then
1858 	     later on when we see its exit status, we re-add it back.
1859 	     We also make sure that we only report a whole-process
1860 	     exit when we see the leader exiting, as opposed to when
1861 	     the last LWP in the LWP list exits, which can be a
1862 	     non-leader if we deleted the leader here.  */
1863 	  threads_debug_printf ("Thread group leader %d zombie "
1864 				"(it exited, or another thread execd), "
1865 				"deleting it.",
1866 				leader_pid);
1867 
1868 	  thread_info *leader_thread = get_lwp_thread (leader_lp);
1869 	  if (report_exit_events_for (leader_thread))
1870 	    {
1871 	      mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1872 	      new_pending_event = true;
1873 	    }
1874 	  else
1875 	    delete_lwp (leader_lp);
1876 	}
1877     });
1878 
1879   return new_pending_event;
1880 }
1881 
1882 /* Callback for `find_thread'.  Returns the first LWP that is not
1883    stopped.  */
1884 
1885 static bool
1886 not_stopped_callback (thread_info *thread, ptid_t filter)
1887 {
1888   if (!thread->id.matches (filter))
1889     return false;
1890 
1891   lwp_info *lwp = get_thread_lwp (thread);
1892 
1893   return !lwp->stopped;
1894 }
1895 
1896 /* Increment LWP's suspend count.  */
1897 
1898 static void
1899 lwp_suspended_inc (struct lwp_info *lwp)
1900 {
1901   lwp->suspended++;
1902 
1903   if (lwp->suspended > 4)
1904     threads_debug_printf
1905       ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1906        lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1907 }
1908 
1909 /* Decrement LWP's suspend count.  */
1910 
1911 static void
1912 lwp_suspended_decr (struct lwp_info *lwp)
1913 {
1914   lwp->suspended--;
1915 
1916   if (lwp->suspended < 0)
1917     {
1918       struct thread_info *thread = get_lwp_thread (lwp);
1919 
1920       internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1921 		      lwp->suspended);
1922     }
1923 }
1924 
1925 /* This function should only be called if the LWP got a SIGTRAP.
1926 
1927    Handle any tracepoint steps or hits.  Return true if a tracepoint
1928    event was handled, 0 otherwise.  */
1929 
1930 static int
1931 handle_tracepoints (struct lwp_info *lwp)
1932 {
1933   struct thread_info *tinfo = get_lwp_thread (lwp);
1934   int tpoint_related_event = 0;
1935 
1936   gdb_assert (lwp->suspended == 0);
1937 
1938   /* If this tracepoint hit causes a tracing stop, we'll immediately
1939      uninsert tracepoints.  To do this, we temporarily pause all
1940      threads, unpatch away, and then unpause threads.  We need to make
1941      sure the unpausing doesn't resume LWP too.  */
1942   lwp_suspended_inc (lwp);
1943 
1944   /* And we need to be sure that any all-threads-stopping doesn't try
1945      to move threads out of the jump pads, as it could deadlock the
1946      inferior (LWP could be in the jump pad, maybe even holding the
1947      lock.)  */
1948 
1949   /* Do any necessary step collect actions.  */
1950   tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1951 
1952   tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1953 
1954   /* See if we just hit a tracepoint and do its main collect
1955      actions.  */
1956   tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1957 
1958   lwp_suspended_decr (lwp);
1959 
1960   gdb_assert (lwp->suspended == 0);
1961   gdb_assert (!stabilizing_threads
1962 	      || (lwp->collecting_fast_tracepoint
1963 		  != fast_tpoint_collect_result::not_collecting));
1964 
1965   if (tpoint_related_event)
1966     {
1967       threads_debug_printf ("got a tracepoint event");
1968       return 1;
1969     }
1970 
1971   return 0;
1972 }
1973 
1974 fast_tpoint_collect_result
1975 linux_process_target::linux_fast_tracepoint_collecting
1976   (lwp_info *lwp, fast_tpoint_collect_status *status)
1977 {
1978   CORE_ADDR thread_area;
1979   struct thread_info *thread = get_lwp_thread (lwp);
1980 
1981   /* Get the thread area address.  This is used to recognize which
1982      thread is which when tracing with the in-process agent library.
1983      We don't read anything from the address, and treat it as opaque;
1984      it's the address itself that we assume is unique per-thread.  */
1985   if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1986     return fast_tpoint_collect_result::not_collecting;
1987 
1988   return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1989 }
1990 
1991 int
1992 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1993 {
1994   return -1;
1995 }
1996 
1997 bool
1998 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1999 {
2000   scoped_restore_current_thread restore_thread;
2001   switch_to_thread (get_lwp_thread (lwp));
2002 
2003   if ((wstat == NULL
2004        || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2005       && supports_fast_tracepoints ()
2006       && agent_loaded_p ())
2007     {
2008       struct fast_tpoint_collect_status status;
2009 
2010       threads_debug_printf
2011 	("Checking whether LWP %ld needs to move out of the jump pad.",
2012 	 lwpid_of (current_thread));
2013 
2014       fast_tpoint_collect_result r
2015 	= linux_fast_tracepoint_collecting (lwp, &status);
2016 
2017       if (wstat == NULL
2018 	  || (WSTOPSIG (*wstat) != SIGILL
2019 	      && WSTOPSIG (*wstat) != SIGFPE
2020 	      && WSTOPSIG (*wstat) != SIGSEGV
2021 	      && WSTOPSIG (*wstat) != SIGBUS))
2022 	{
2023 	  lwp->collecting_fast_tracepoint = r;
2024 
2025 	  if (r != fast_tpoint_collect_result::not_collecting)
2026 	    {
2027 	      if (r == fast_tpoint_collect_result::before_insn
2028 		  && lwp->exit_jump_pad_bkpt == NULL)
2029 		{
2030 		  /* Haven't executed the original instruction yet.
2031 		     Set breakpoint there, and wait till it's hit,
2032 		     then single-step until exiting the jump pad.  */
2033 		  lwp->exit_jump_pad_bkpt
2034 		    = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2035 		}
2036 
2037 	      threads_debug_printf
2038 		("Checking whether LWP %ld needs to move out of the jump pad..."
2039 		 " it does", lwpid_of (current_thread));
2040 
2041 	      return true;
2042 	    }
2043 	}
2044       else
2045 	{
2046 	  /* If we get a synchronous signal while collecting, *and*
2047 	     while executing the (relocated) original instruction,
2048 	     reset the PC to point at the tpoint address, before
2049 	     reporting to GDB.  Otherwise, it's an IPA lib bug: just
2050 	     report the signal to GDB, and pray for the best.  */
2051 
2052 	  lwp->collecting_fast_tracepoint
2053 	    = fast_tpoint_collect_result::not_collecting;
2054 
2055 	  if (r != fast_tpoint_collect_result::not_collecting
2056 	      && (status.adjusted_insn_addr <= lwp->stop_pc
2057 		  && lwp->stop_pc < status.adjusted_insn_addr_end))
2058 	    {
2059 	      siginfo_t info;
2060 	      struct regcache *regcache;
2061 
2062 	      /* The si_addr on a few signals references the address
2063 		 of the faulting instruction.  Adjust that as
2064 		 well.  */
2065 	      if ((WSTOPSIG (*wstat) == SIGILL
2066 		   || WSTOPSIG (*wstat) == SIGFPE
2067 		   || WSTOPSIG (*wstat) == SIGBUS
2068 		   || WSTOPSIG (*wstat) == SIGSEGV)
2069 		  && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2070 			     (PTRACE_TYPE_ARG3) 0, &info) == 0
2071 		  /* Final check just to make sure we don't clobber
2072 		     the siginfo of non-kernel-sent signals.  */
2073 		  && (uintptr_t) info.si_addr == lwp->stop_pc)
2074 		{
2075 		  info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2076 		  ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2077 			  (PTRACE_TYPE_ARG3) 0, &info);
2078 		}
2079 
2080 	      regcache = get_thread_regcache (current_thread, 1);
2081 	      low_set_pc (regcache, status.tpoint_addr);
2082 	      lwp->stop_pc = status.tpoint_addr;
2083 
2084 	      /* Cancel any fast tracepoint lock this thread was
2085 		 holding.  */
2086 	      force_unlock_trace_buffer ();
2087 	    }
2088 
2089 	  if (lwp->exit_jump_pad_bkpt != NULL)
2090 	    {
2091 	      threads_debug_printf
2092 		("Cancelling fast exit-jump-pad: removing bkpt."
2093 		 "stopping all threads momentarily.");
2094 
2095 	      stop_all_lwps (1, lwp);
2096 
2097 	      delete_breakpoint (lwp->exit_jump_pad_bkpt);
2098 	      lwp->exit_jump_pad_bkpt = NULL;
2099 
2100 	      unstop_all_lwps (1, lwp);
2101 
2102 	      gdb_assert (lwp->suspended >= 0);
2103 	    }
2104 	}
2105     }
2106 
2107   threads_debug_printf
2108     ("Checking whether LWP %ld needs to move out of the jump pad... no",
2109      lwpid_of (current_thread));
2110 
2111   return false;
2112 }
2113 
2114 /* Enqueue one signal in the "signals to report later when out of the
2115    jump pad" list.  */
2116 
2117 static void
2118 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2119 {
2120   struct thread_info *thread = get_lwp_thread (lwp);
2121 
2122   threads_debug_printf ("Deferring signal %d for LWP %ld.",
2123 			WSTOPSIG (*wstat), lwpid_of (thread));
2124 
2125   if (debug_threads)
2126     {
2127       for (const auto &sig : lwp->pending_signals_to_report)
2128 	threads_debug_printf ("   Already queued %d", sig.signal);
2129 
2130       threads_debug_printf ("   (no more currently queued signals)");
2131     }
2132 
2133   /* Don't enqueue non-RT signals if they are already in the deferred
2134      queue.  (SIGSTOP being the easiest signal to see ending up here
2135      twice)  */
2136   if (WSTOPSIG (*wstat) < __SIGRTMIN)
2137     {
2138       for (const auto &sig : lwp->pending_signals_to_report)
2139 	{
2140 	  if (sig.signal == WSTOPSIG (*wstat))
2141 	    {
2142 	      threads_debug_printf
2143 		("Not requeuing already queued non-RT signal %d for LWP %ld",
2144 		 sig.signal, lwpid_of (thread));
2145 	      return;
2146 	    }
2147 	}
2148     }
2149 
2150   lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2151 
2152   ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2153 	  &lwp->pending_signals_to_report.back ().info);
2154 }
2155 
2156 /* Dequeue one signal from the "signals to report later when out of
2157    the jump pad" list.  */
2158 
2159 static int
2160 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2161 {
2162   struct thread_info *thread = get_lwp_thread (lwp);
2163 
2164   if (!lwp->pending_signals_to_report.empty ())
2165     {
2166       const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2167 
2168       *wstat = W_STOPCODE (p_sig.signal);
2169       if (p_sig.info.si_signo != 0)
2170 	ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2171 		&p_sig.info);
2172 
2173       lwp->pending_signals_to_report.pop_front ();
2174 
2175       threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2176 			    WSTOPSIG (*wstat), lwpid_of (thread));
2177 
2178       if (debug_threads)
2179 	{
2180 	  for (const auto &sig : lwp->pending_signals_to_report)
2181 	    threads_debug_printf ("   Still queued %d", sig.signal);
2182 
2183 	  threads_debug_printf ("   (no more queued signals)");
2184 	}
2185 
2186       return 1;
2187     }
2188 
2189   return 0;
2190 }
2191 
2192 bool
2193 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2194 {
2195   scoped_restore_current_thread restore_thread;
2196   switch_to_thread (get_lwp_thread (child));
2197 
2198   if (low_stopped_by_watchpoint ())
2199     {
2200       child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2201       child->stopped_data_address = low_stopped_data_address ();
2202     }
2203 
2204   return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2205 }
2206 
2207 bool
2208 linux_process_target::low_stopped_by_watchpoint ()
2209 {
2210   return false;
2211 }
2212 
2213 CORE_ADDR
2214 linux_process_target::low_stopped_data_address ()
2215 {
2216   return 0;
2217 }
2218 
2219 /* Return the ptrace options that we want to try to enable.  */
2220 
2221 static int
2222 linux_low_ptrace_options (int attached)
2223 {
2224   client_state &cs = get_client_state ();
2225   int options = 0;
2226 
2227   if (!attached)
2228     options |= PTRACE_O_EXITKILL;
2229 
2230   if (cs.report_fork_events)
2231     options |= PTRACE_O_TRACEFORK;
2232 
2233   if (cs.report_vfork_events)
2234     options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2235 
2236   if (cs.report_exec_events)
2237     options |= PTRACE_O_TRACEEXEC;
2238 
2239   options |= PTRACE_O_TRACESYSGOOD;
2240 
2241   return options;
2242 }
2243 
2244 void
2245 linux_process_target::filter_event (int lwpid, int wstat)
2246 {
2247   struct lwp_info *child;
2248   struct thread_info *thread;
2249   int have_stop_pc = 0;
2250 
2251   child = find_lwp_pid (ptid_t (lwpid));
2252 
2253   /* Check for events reported by anything not in our LWP list.  */
2254   if (child == nullptr)
2255     {
2256       if (WIFSTOPPED (wstat))
2257 	{
2258 	  if (WSTOPSIG (wstat) == SIGTRAP
2259 	      && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2260 	    {
2261 	      /* A non-leader thread exec'ed after we've seen the
2262 		 leader zombie, and removed it from our lists (in
2263 		 check_zombie_leaders).  The non-leader thread changes
2264 		 its tid to the tgid.  */
2265 	      threads_debug_printf
2266 		("Re-adding thread group leader LWP %d after exec.",
2267 		 lwpid);
2268 
2269 	      child = add_lwp (ptid_t (lwpid, lwpid));
2270 	      child->stopped = 1;
2271 	      switch_to_thread (child->thread);
2272 	    }
2273 	  else
2274 	    {
2275 	      /* A process we are controlling has forked and the new
2276 		 child's stop was reported to us by the kernel.  Save
2277 		 its PID and go back to waiting for the fork event to
2278 		 be reported - the stopped process might be returned
2279 		 from waitpid before or after the fork event is.  */
2280 	      threads_debug_printf
2281 		("Saving LWP %d status %s in stopped_pids list",
2282 		 lwpid, status_to_str (wstat).c_str ());
2283 	      add_to_pid_list (&stopped_pids, lwpid, wstat);
2284 	    }
2285 	}
2286       else
2287 	{
2288 	  /* Don't report an event for the exit of an LWP not in our
2289 	     list, i.e. not part of any inferior we're debugging.
2290 	     This can happen if we detach from a program we originally
2291 	     forked and then it exits.  However, note that we may have
2292 	     earlier deleted a leader of an inferior we're debugging,
2293 	     in check_zombie_leaders.  Re-add it back here if so.  */
2294 	  find_process ([&] (process_info *proc)
2295 	    {
2296 	      if (proc->pid == lwpid)
2297 		{
2298 		  threads_debug_printf
2299 		    ("Re-adding thread group leader LWP %d after exit.",
2300 		     lwpid);
2301 
2302 		  child = add_lwp (ptid_t (lwpid, lwpid));
2303 		  return true;
2304 		}
2305 	      return false;
2306 	    });
2307 	}
2308 
2309       if (child == nullptr)
2310 	return;
2311     }
2312 
2313   thread = get_lwp_thread (child);
2314 
2315   child->stopped = 1;
2316 
2317   child->last_status = wstat;
2318 
2319   /* Check if the thread has exited.  */
2320   if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2321     {
2322       threads_debug_printf ("%d exited", lwpid);
2323 
2324       if (finish_step_over (child))
2325 	{
2326 	  /* Unsuspend all other LWPs, and set them back running again.  */
2327 	  unsuspend_all_lwps (child);
2328 	}
2329 
2330       /* If this is not the leader LWP, then the exit signal was not
2331 	 the end of the debugged application and should be ignored,
2332 	 unless GDB wants to hear about thread exits.  */
2333       if (report_exit_events_for (thread) || is_leader (thread))
2334 	{
2335 	  /* Since events are serialized to GDB core, and we can't
2336 	     report this one right now.  Leave the status pending for
2337 	     the next time we're able to report it.  */
2338 	  mark_lwp_dead (child, wstat, false);
2339 	  return;
2340 	}
2341       else
2342 	{
2343 	  delete_lwp (child);
2344 	  return;
2345 	}
2346     }
2347 
2348   gdb_assert (WIFSTOPPED (wstat));
2349 
2350   if (WIFSTOPPED (wstat))
2351     {
2352       struct process_info *proc;
2353 
2354       /* Architecture-specific setup after inferior is running.  */
2355       proc = find_process_pid (pid_of (thread));
2356       if (proc->tdesc == NULL)
2357 	{
2358 	  if (proc->attached)
2359 	    {
2360 	      /* This needs to happen after we have attached to the
2361 		 inferior and it is stopped for the first time, but
2362 		 before we access any inferior registers.  */
2363 	      arch_setup_thread (thread);
2364 	    }
2365 	  else
2366 	    {
2367 	      /* The process is started, but GDBserver will do
2368 		 architecture-specific setup after the program stops at
2369 		 the first instruction.  */
2370 	      child->status_pending_p = 1;
2371 	      child->status_pending = wstat;
2372 	      return;
2373 	    }
2374 	}
2375     }
2376 
2377   if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2378     {
2379       struct process_info *proc = find_process_pid (pid_of (thread));
2380       int options = linux_low_ptrace_options (proc->attached);
2381 
2382       linux_enable_event_reporting (lwpid, options);
2383       child->must_set_ptrace_flags = 0;
2384     }
2385 
2386   /* Always update syscall_state, even if it will be filtered later.  */
2387   if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2388     {
2389       child->syscall_state
2390 	= (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2391 	   ? TARGET_WAITKIND_SYSCALL_RETURN
2392 	   : TARGET_WAITKIND_SYSCALL_ENTRY);
2393     }
2394   else
2395     {
2396       /* Almost all other ptrace-stops are known to be outside of system
2397 	 calls, with further exceptions in handle_extended_wait.  */
2398       child->syscall_state = TARGET_WAITKIND_IGNORE;
2399     }
2400 
2401   /* Be careful to not overwrite stop_pc until save_stop_reason is
2402      called.  */
2403   if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2404       && linux_is_extended_waitstatus (wstat))
2405     {
2406       child->stop_pc = get_pc (child);
2407       if (handle_extended_wait (&child, wstat))
2408 	{
2409 	  /* The event has been handled, so just return without
2410 	     reporting it.  */
2411 	  return;
2412 	}
2413     }
2414 
2415   if (linux_wstatus_maybe_breakpoint (wstat))
2416     {
2417       if (save_stop_reason (child))
2418 	have_stop_pc = 1;
2419     }
2420 
2421   if (!have_stop_pc)
2422     child->stop_pc = get_pc (child);
2423 
2424   if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2425       && child->stop_expected)
2426     {
2427       threads_debug_printf ("Expected stop.");
2428 
2429       child->stop_expected = 0;
2430 
2431       if (thread->last_resume_kind == resume_stop)
2432 	{
2433 	  /* We want to report the stop to the core.  Treat the
2434 	     SIGSTOP as a normal event.  */
2435 	  threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2436 				target_pid_to_str (ptid_of (thread)).c_str ());
2437 	}
2438       else if (stopping_threads != NOT_STOPPING_THREADS)
2439 	{
2440 	  /* Stopping threads.  We don't want this SIGSTOP to end up
2441 	     pending.  */
2442 	  threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2443 				target_pid_to_str (ptid_of (thread)).c_str ());
2444 	  return;
2445 	}
2446       else
2447 	{
2448 	  /* This is a delayed SIGSTOP.  Filter out the event.  */
2449 	  threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2450 			  child->stepping ? "step" : "continue",
2451 			  target_pid_to_str (ptid_of (thread)).c_str ());
2452 
2453 	  resume_one_lwp (child, child->stepping, 0, NULL);
2454 	  return;
2455 	}
2456     }
2457 
2458   child->status_pending_p = 1;
2459   child->status_pending = wstat;
2460   return;
2461 }
2462 
2463 bool
2464 linux_process_target::maybe_hw_step (thread_info *thread)
2465 {
2466   if (supports_hardware_single_step ())
2467     return true;
2468   else
2469     {
2470       /* GDBserver must insert single-step breakpoint for software
2471 	 single step.  */
2472       gdb_assert (has_single_step_breakpoints (thread));
2473       return false;
2474     }
2475 }
2476 
2477 void
2478 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2479 {
2480   struct lwp_info *lp = get_thread_lwp (thread);
2481 
2482   if (lp->stopped
2483       && !lp->suspended
2484       && !lp->status_pending_p
2485       && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2486     {
2487       int step = 0;
2488 
2489       if (thread->last_resume_kind == resume_step)
2490 	{
2491 	  if (supports_software_single_step ())
2492 	    install_software_single_step_breakpoints (lp);
2493 
2494 	  step = maybe_hw_step (thread);
2495 	}
2496 
2497       threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2498 			    target_pid_to_str (ptid_of (thread)).c_str (),
2499 			    paddress (lp->stop_pc), step);
2500 
2501       resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2502     }
2503 }
2504 
2505 int
2506 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2507 					       ptid_t filter_ptid,
2508 					       int *wstatp, int options)
2509 {
2510   struct thread_info *event_thread;
2511   struct lwp_info *event_child, *requested_child;
2512   sigset_t block_mask, prev_mask;
2513 
2514  retry:
2515   /* N.B. event_thread points to the thread_info struct that contains
2516      event_child.  Keep them in sync.  */
2517   event_thread = NULL;
2518   event_child = NULL;
2519   requested_child = NULL;
2520 
2521   /* Check for a lwp with a pending status.  */
2522 
2523   if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2524     {
2525       event_thread = find_thread_in_random ([&] (thread_info *thread)
2526 	{
2527 	  return status_pending_p_callback (thread, filter_ptid);
2528 	});
2529 
2530       if (event_thread != NULL)
2531 	{
2532 	  event_child = get_thread_lwp (event_thread);
2533 	  threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2534 	}
2535     }
2536   else if (filter_ptid != null_ptid)
2537     {
2538       requested_child = find_lwp_pid (filter_ptid);
2539       gdb_assert (requested_child != nullptr);
2540 
2541       if (stopping_threads == NOT_STOPPING_THREADS
2542 	  && requested_child->status_pending_p
2543 	  && (requested_child->collecting_fast_tracepoint
2544 	      != fast_tpoint_collect_result::not_collecting))
2545 	{
2546 	  enqueue_one_deferred_signal (requested_child,
2547 				       &requested_child->status_pending);
2548 	  requested_child->status_pending_p = 0;
2549 	  requested_child->status_pending = 0;
2550 	  resume_one_lwp (requested_child, 0, 0, NULL);
2551 	}
2552 
2553       if (requested_child->suspended
2554 	  && requested_child->status_pending_p)
2555 	{
2556 	  internal_error ("requesting an event out of a"
2557 			  " suspended child?");
2558 	}
2559 
2560       if (requested_child->status_pending_p)
2561 	{
2562 	  event_child = requested_child;
2563 	  event_thread = get_lwp_thread (event_child);
2564 	}
2565     }
2566 
2567   if (event_child != NULL)
2568     {
2569       threads_debug_printf ("Got an event from pending child %ld (%04x)",
2570 			    lwpid_of (event_thread),
2571 			    event_child->status_pending);
2572 
2573       *wstatp = event_child->status_pending;
2574       event_child->status_pending_p = 0;
2575       event_child->status_pending = 0;
2576       switch_to_thread (event_thread);
2577       return lwpid_of (event_thread);
2578     }
2579 
2580   /* But if we don't find a pending event, we'll have to wait.
2581 
2582      We only enter this loop if no process has a pending wait status.
2583      Thus any action taken in response to a wait status inside this
2584      loop is responding as soon as we detect the status, not after any
2585      pending events.  */
2586 
2587   /* Make sure SIGCHLD is blocked until the sigsuspend below.  Block
2588      all signals while here.  */
2589   sigfillset (&block_mask);
2590   gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2591 
2592   /* Always pull all events out of the kernel.  We'll randomly select
2593      an event LWP out of all that have events, to prevent
2594      starvation.  */
2595   while (event_child == NULL)
2596     {
2597       pid_t ret = 0;
2598 
2599       /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2600 	 quirks:
2601 
2602 	 - If the thread group leader exits while other threads in the
2603 	   thread group still exist, waitpid(TGID, ...) hangs.  That
2604 	   waitpid won't return an exit status until the other threads
2605 	   in the group are reaped.
2606 
2607 	 - When a non-leader thread execs, that thread just vanishes
2608 	   without reporting an exit (so we'd hang if we waited for it
2609 	   explicitly in that case).  The exec event is reported to
2610 	   the TGID pid.  */
2611       errno = 0;
2612       ret = my_waitpid (-1, wstatp, options | WNOHANG);
2613 
2614       threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2615 			    ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2616 
2617       if (ret > 0)
2618 	{
2619 	  threads_debug_printf ("waitpid %ld received %s",
2620 				(long) ret, status_to_str (*wstatp).c_str ());
2621 
2622 	  /* Filter all events.  IOW, leave all events pending.  We'll
2623 	     randomly select an event LWP out of all that have events
2624 	     below.  */
2625 	  filter_event (ret, *wstatp);
2626 	  /* Retry until nothing comes out of waitpid.  A single
2627 	     SIGCHLD can indicate more than one child stopped.  */
2628 	  continue;
2629 	}
2630 
2631       /* Now that we've pulled all events out of the kernel, resume
2632 	 LWPs that don't have an interesting event to report.  */
2633       if (stopping_threads == NOT_STOPPING_THREADS)
2634 	for_each_thread ([this] (thread_info *thread)
2635 	  {
2636 	    resume_stopped_resumed_lwps (thread);
2637 	  });
2638 
2639       /* ... and find an LWP with a status to report to the core, if
2640 	 any.  */
2641       event_thread = find_thread_in_random ([&] (thread_info *thread)
2642 	{
2643 	  return status_pending_p_callback (thread, filter_ptid);
2644 	});
2645 
2646       if (event_thread != NULL)
2647 	{
2648 	  event_child = get_thread_lwp (event_thread);
2649 	  *wstatp = event_child->status_pending;
2650 	  event_child->status_pending_p = 0;
2651 	  event_child->status_pending = 0;
2652 	  break;
2653 	}
2654 
2655       /* Check for zombie thread group leaders.  Those can't be reaped
2656 	 until all other threads in the thread group are.  */
2657       if (check_zombie_leaders ())
2658 	goto retry;
2659 
2660       auto not_stopped = [&] (thread_info *thread)
2661 	{
2662 	  return not_stopped_callback (thread, wait_ptid);
2663 	};
2664 
2665       /* If there are no resumed children left in the set of LWPs we
2666 	 want to wait for, bail.  We can't just block in
2667 	 waitpid/sigsuspend, because lwps might have been left stopped
2668 	 in trace-stop state, and we'd be stuck forever waiting for
2669 	 their status to change (which would only happen if we resumed
2670 	 them).  Even if WNOHANG is set, this return code is preferred
2671 	 over 0 (below), as it is more detailed.  */
2672       if (find_thread (not_stopped) == NULL)
2673 	{
2674 	  threads_debug_printf ("exit (no unwaited-for LWP)");
2675 
2676 	  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2677 	  return -1;
2678 	}
2679 
2680       /* No interesting event to report to the caller.  */
2681       if ((options & WNOHANG))
2682 	{
2683 	  threads_debug_printf ("WNOHANG set, no event found");
2684 
2685 	  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2686 	  return 0;
2687 	}
2688 
2689       /* Block until we get an event reported with SIGCHLD.  */
2690       threads_debug_printf ("sigsuspend'ing");
2691 
2692       sigsuspend (&prev_mask);
2693       gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2694       goto retry;
2695     }
2696 
2697   gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2698 
2699   switch_to_thread (event_thread);
2700 
2701   return lwpid_of (event_thread);
2702 }
2703 
2704 int
2705 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2706 {
2707   return wait_for_event_filtered (ptid, ptid, wstatp, options);
2708 }
2709 
2710 /* Select one LWP out of those that have events pending.  */
2711 
2712 static void
2713 select_event_lwp (struct lwp_info **orig_lp)
2714 {
2715   struct thread_info *event_thread = NULL;
2716 
2717   /* In all-stop, give preference to the LWP that is being
2718      single-stepped.  There will be at most one, and it's the LWP that
2719      the core is most interested in.  If we didn't do this, then we'd
2720      have to handle pending step SIGTRAPs somehow in case the core
2721      later continues the previously-stepped thread, otherwise we'd
2722      report the pending SIGTRAP, and the core, not having stepped the
2723      thread, wouldn't understand what the trap was for, and therefore
2724      would report it to the user as a random signal.  */
2725   if (!non_stop)
2726     {
2727       event_thread = find_thread ([] (thread_info *thread)
2728 	{
2729 	  lwp_info *lp = get_thread_lwp (thread);
2730 
2731 	  return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2732 		  && thread->last_resume_kind == resume_step
2733 		  && lp->status_pending_p);
2734 	});
2735 
2736       if (event_thread != NULL)
2737 	threads_debug_printf
2738 	  ("Select single-step %s",
2739 	   target_pid_to_str (ptid_of (event_thread)).c_str ());
2740     }
2741   if (event_thread == NULL)
2742     {
2743       /* No single-stepping LWP.  Select one at random, out of those
2744 	 which have had events.  */
2745 
2746       event_thread = find_thread_in_random ([&] (thread_info *thread)
2747 	{
2748 	  lwp_info *lp = get_thread_lwp (thread);
2749 
2750 	  /* Only resumed LWPs that have an event pending. */
2751 	  return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2752 		  && lp->status_pending_p);
2753 	});
2754     }
2755 
2756   if (event_thread != NULL)
2757     {
2758       struct lwp_info *event_lp = get_thread_lwp (event_thread);
2759 
2760       /* Switch the event LWP.  */
2761       *orig_lp = event_lp;
2762     }
2763 }
2764 
2765 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2766    NULL.  */
2767 
2768 static void
2769 unsuspend_all_lwps (struct lwp_info *except)
2770 {
2771   for_each_thread ([&] (thread_info *thread)
2772     {
2773       lwp_info *lwp = get_thread_lwp (thread);
2774 
2775       if (lwp != except)
2776 	lwp_suspended_decr (lwp);
2777     });
2778 }
2779 
2780 static bool lwp_running (thread_info *thread);
2781 
2782 /* Stabilize threads (move out of jump pads).
2783 
2784    If a thread is midway collecting a fast tracepoint, we need to
2785    finish the collection and move it out of the jump pad before
2786    reporting the signal.
2787 
2788    This avoids recursion while collecting (when a signal arrives
2789    midway, and the signal handler itself collects), which would trash
2790    the trace buffer.  In case the user set a breakpoint in a signal
2791    handler, this avoids the backtrace showing the jump pad, etc..
2792    Most importantly, there are certain things we can't do safely if
2793    threads are stopped in a jump pad (or in its callee's).  For
2794    example:
2795 
2796      - starting a new trace run.  A thread still collecting the
2797    previous run, could trash the trace buffer when resumed.  The trace
2798    buffer control structures would have been reset but the thread had
2799    no way to tell.  The thread could even midway memcpy'ing to the
2800    buffer, which would mean that when resumed, it would clobber the
2801    trace buffer that had been set for a new run.
2802 
2803      - we can't rewrite/reuse the jump pads for new tracepoints
2804    safely.  Say you do tstart while a thread is stopped midway while
2805    collecting.  When the thread is later resumed, it finishes the
2806    collection, and returns to the jump pad, to execute the original
2807    instruction that was under the tracepoint jump at the time the
2808    older run had been started.  If the jump pad had been rewritten
2809    since for something else in the new run, the thread would now
2810    execute the wrong / random instructions.  */
2811 
2812 void
2813 linux_process_target::stabilize_threads ()
2814 {
2815   thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2816 				{
2817 				  return stuck_in_jump_pad (thread);
2818 				});
2819 
2820   if (thread_stuck != NULL)
2821     {
2822       threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2823 			    lwpid_of (thread_stuck));
2824       return;
2825     }
2826 
2827   scoped_restore_current_thread restore_thread;
2828 
2829   stabilizing_threads = 1;
2830 
2831   /* Kick 'em all.  */
2832   for_each_thread ([this] (thread_info *thread)
2833     {
2834       move_out_of_jump_pad (thread);
2835     });
2836 
2837   /* Loop until all are stopped out of the jump pads.  */
2838   while (find_thread (lwp_running) != NULL)
2839     {
2840       struct target_waitstatus ourstatus;
2841       struct lwp_info *lwp;
2842       int wstat;
2843 
2844       /* Note that we go through the full wait even loop.  While
2845 	 moving threads out of jump pad, we need to be able to step
2846 	 over internal breakpoints and such.  */
2847       wait_1 (minus_one_ptid, &ourstatus, 0);
2848 
2849       if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2850 	{
2851 	  lwp = get_thread_lwp (current_thread);
2852 
2853 	  /* Lock it.  */
2854 	  lwp_suspended_inc (lwp);
2855 
2856 	  if (ourstatus.sig () != GDB_SIGNAL_0
2857 	      || current_thread->last_resume_kind == resume_stop)
2858 	    {
2859 	      wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2860 	      enqueue_one_deferred_signal (lwp, &wstat);
2861 	    }
2862 	}
2863     }
2864 
2865   unsuspend_all_lwps (NULL);
2866 
2867   stabilizing_threads = 0;
2868 
2869   if (debug_threads)
2870     {
2871       thread_stuck = find_thread ([this] (thread_info *thread)
2872 		       {
2873 			 return stuck_in_jump_pad (thread);
2874 		       });
2875 
2876       if (thread_stuck != NULL)
2877 	threads_debug_printf
2878 	  ("couldn't stabilize, LWP %ld got stuck in jump pad",
2879 	   lwpid_of (thread_stuck));
2880     }
2881 }
2882 
2883 /* Convenience function that is called when the kernel reports an
2884    event that is not passed out to GDB.  */
2885 
2886 static ptid_t
2887 ignore_event (struct target_waitstatus *ourstatus)
2888 {
2889   /* If we got an event, there may still be others, as a single
2890      SIGCHLD can indicate more than one child stopped.  This forces
2891      another target_wait call.  */
2892   async_file_mark ();
2893 
2894   ourstatus->set_ignore ();
2895   return null_ptid;
2896 }
2897 
2898 ptid_t
2899 linux_process_target::filter_exit_event (lwp_info *event_child,
2900 					 target_waitstatus *ourstatus)
2901 {
2902   struct thread_info *thread = get_lwp_thread (event_child);
2903   ptid_t ptid = ptid_of (thread);
2904 
2905   if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2906     {
2907       /* We're reporting a thread exit for the leader.  The exit was
2908 	 detected by check_zombie_leaders.  */
2909       gdb_assert (is_leader (thread));
2910       gdb_assert (report_exit_events_for (thread));
2911 
2912       delete_lwp (event_child);
2913       return ptid;
2914     }
2915 
2916   /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2917      if a non-leader thread exits with a signal, we'd report it to the
2918      core which would interpret it as the whole-process exiting.
2919      There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind.  */
2920   if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2921       && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2922     return ptid;
2923 
2924   if (!is_leader (thread))
2925     {
2926       if (report_exit_events_for (thread))
2927 	ourstatus->set_thread_exited (0);
2928       else
2929 	ourstatus->set_ignore ();
2930 
2931       delete_lwp (event_child);
2932     }
2933   return ptid;
2934 }
2935 
2936 /* Returns 1 if GDB is interested in any event_child syscalls.  */
2937 
2938 static int
2939 gdb_catching_syscalls_p (struct lwp_info *event_child)
2940 {
2941   struct thread_info *thread = get_lwp_thread (event_child);
2942   struct process_info *proc = get_thread_process (thread);
2943 
2944   return !proc->syscalls_to_catch.empty ();
2945 }
2946 
2947 bool
2948 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2949 {
2950   int sysno;
2951   struct thread_info *thread = get_lwp_thread (event_child);
2952   struct process_info *proc = get_thread_process (thread);
2953 
2954   if (proc->syscalls_to_catch.empty ())
2955     return false;
2956 
2957   if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2958     return true;
2959 
2960   get_syscall_trapinfo (event_child, &sysno);
2961 
2962   for (int iter : proc->syscalls_to_catch)
2963     if (iter == sysno)
2964       return true;
2965 
2966   return false;
2967 }
2968 
2969 ptid_t
2970 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2971 			      target_wait_flags target_options)
2972 {
2973   THREADS_SCOPED_DEBUG_ENTER_EXIT;
2974 
2975   client_state &cs = get_client_state ();
2976   int w;
2977   struct lwp_info *event_child;
2978   int options;
2979   int pid;
2980   int step_over_finished;
2981   int bp_explains_trap;
2982   int maybe_internal_trap;
2983   int report_to_gdb;
2984   int trace_event;
2985   int in_step_range;
2986 
2987   threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2988 
2989   /* Translate generic target options into linux options.  */
2990   options = __WALL;
2991   if (target_options & TARGET_WNOHANG)
2992     options |= WNOHANG;
2993 
2994   bp_explains_trap = 0;
2995   trace_event = 0;
2996   in_step_range = 0;
2997   ourstatus->set_ignore ();
2998 
2999   bool was_any_resumed = any_resumed ();
3000 
3001   if (step_over_bkpt == null_ptid)
3002     pid = wait_for_event (ptid, &w, options);
3003   else
3004     {
3005       threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3006 			    target_pid_to_str (step_over_bkpt).c_str ());
3007       pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3008     }
3009 
3010   if (pid == 0 || (pid == -1 && !was_any_resumed))
3011     {
3012       gdb_assert (target_options & TARGET_WNOHANG);
3013 
3014       threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3015 
3016       ourstatus->set_ignore ();
3017       return null_ptid;
3018     }
3019   else if (pid == -1)
3020     {
3021       threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3022 
3023       ourstatus->set_no_resumed ();
3024       return null_ptid;
3025     }
3026 
3027   event_child = get_thread_lwp (current_thread);
3028 
3029   /* wait_for_event only returns an exit status for the last
3030      child of a process.  Report it.  */
3031   if (WIFEXITED (w) || WIFSIGNALED (w))
3032     {
3033       if (WIFEXITED (w))
3034 	{
3035 	  /* If we already have the exit recorded in waitstatus, use
3036 	     it.  This will happen when we detect a zombie leader,
3037 	     when we had GDB_THREAD_OPTION_EXIT enabled for it.  We
3038 	     want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3039 	     as the whole process hasn't exited yet.  */
3040 	  const target_waitstatus &ws = event_child->waitstatus;
3041 	  if (ws.kind () != TARGET_WAITKIND_IGNORE)
3042 	    {
3043 	      gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3044 			  || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3045 	      *ourstatus = ws;
3046 	    }
3047 	  else
3048 	    ourstatus->set_exited (WEXITSTATUS (w));
3049 
3050 	  threads_debug_printf
3051 	    ("ret = %s, exited with retcode %d",
3052 	     target_pid_to_str (ptid_of (current_thread)).c_str (),
3053 	     WEXITSTATUS (w));
3054 	}
3055       else
3056 	{
3057 	  ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3058 
3059 	  threads_debug_printf
3060 	    ("ret = %s, terminated with signal %d",
3061 	     target_pid_to_str (ptid_of (current_thread)).c_str (),
3062 	     WTERMSIG (w));
3063 	}
3064 
3065       return filter_exit_event (event_child, ourstatus);
3066     }
3067 
3068   /* If step-over executes a breakpoint instruction, in the case of a
3069      hardware single step it means a gdb/gdbserver breakpoint had been
3070      planted on top of a permanent breakpoint, in the case of a software
3071      single step it may just mean that gdbserver hit the reinsert breakpoint.
3072      The PC has been adjusted by save_stop_reason to point at
3073      the breakpoint address.
3074      So in the case of the hardware single step advance the PC manually
3075      past the breakpoint and in the case of software single step advance only
3076      if it's not the single_step_breakpoint we are hitting.
3077      This avoids that a program would keep trapping a permanent breakpoint
3078      forever.  */
3079   if (step_over_bkpt != null_ptid
3080       && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3081       && (event_child->stepping
3082 	  || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3083     {
3084       int increment_pc = 0;
3085       int breakpoint_kind = 0;
3086       CORE_ADDR stop_pc = event_child->stop_pc;
3087 
3088       breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3089       sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3090 
3091       threads_debug_printf
3092 	("step-over for %s executed software breakpoint",
3093 	 target_pid_to_str (ptid_of (current_thread)).c_str ());
3094 
3095       if (increment_pc != 0)
3096 	{
3097 	  struct regcache *regcache
3098 	    = get_thread_regcache (current_thread, 1);
3099 
3100 	  event_child->stop_pc += increment_pc;
3101 	  low_set_pc (regcache, event_child->stop_pc);
3102 
3103 	  if (!low_breakpoint_at (event_child->stop_pc))
3104 	    event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3105 	}
3106     }
3107 
3108   /* If this event was not handled before, and is not a SIGTRAP, we
3109      report it.  SIGILL and SIGSEGV are also treated as traps in case
3110      a breakpoint is inserted at the current PC.  If this target does
3111      not support internal breakpoints at all, we also report the
3112      SIGTRAP without further processing; it's of no concern to us.  */
3113   maybe_internal_trap
3114     = (low_supports_breakpoints ()
3115        && (WSTOPSIG (w) == SIGTRAP
3116 	   || ((WSTOPSIG (w) == SIGILL
3117 		|| WSTOPSIG (w) == SIGSEGV)
3118 	       && low_breakpoint_at (event_child->stop_pc))));
3119 
3120   if (maybe_internal_trap)
3121     {
3122       /* Handle anything that requires bookkeeping before deciding to
3123 	 report the event or continue waiting.  */
3124 
3125       /* First check if we can explain the SIGTRAP with an internal
3126 	 breakpoint, or if we should possibly report the event to GDB.
3127 	 Do this before anything that may remove or insert a
3128 	 breakpoint.  */
3129       bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3130 
3131       /* We have a SIGTRAP, possibly a step-over dance has just
3132 	 finished.  If so, tweak the state machine accordingly,
3133 	 reinsert breakpoints and delete any single-step
3134 	 breakpoints.  */
3135       step_over_finished = finish_step_over (event_child);
3136 
3137       /* Now invoke the callbacks of any internal breakpoints there.  */
3138       check_breakpoints (event_child->stop_pc);
3139 
3140       /* Handle tracepoint data collecting.  This may overflow the
3141 	 trace buffer, and cause a tracing stop, removing
3142 	 breakpoints.  */
3143       trace_event = handle_tracepoints (event_child);
3144 
3145       if (bp_explains_trap)
3146 	threads_debug_printf ("Hit a gdbserver breakpoint.");
3147     }
3148   else
3149     {
3150       /* We have some other signal, possibly a step-over dance was in
3151 	 progress, and it should be cancelled too.  */
3152       step_over_finished = finish_step_over (event_child);
3153     }
3154 
3155   /* We have all the data we need.  Either report the event to GDB, or
3156      resume threads and keep waiting for more.  */
3157 
3158   /* If we're collecting a fast tracepoint, finish the collection and
3159      move out of the jump pad before delivering a signal.  See
3160      linux_stabilize_threads.  */
3161 
3162   if (WIFSTOPPED (w)
3163       && WSTOPSIG (w) != SIGTRAP
3164       && supports_fast_tracepoints ()
3165       && agent_loaded_p ())
3166     {
3167       threads_debug_printf ("Got signal %d for LWP %ld.  Check if we need "
3168 			    "to defer or adjust it.",
3169 			    WSTOPSIG (w), lwpid_of (current_thread));
3170 
3171       /* Allow debugging the jump pad itself.  */
3172       if (current_thread->last_resume_kind != resume_step
3173 	  && maybe_move_out_of_jump_pad (event_child, &w))
3174 	{
3175 	  enqueue_one_deferred_signal (event_child, &w);
3176 
3177 	  threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3178 				WSTOPSIG (w), lwpid_of (current_thread));
3179 
3180 	  resume_one_lwp (event_child, 0, 0, NULL);
3181 
3182 	  return ignore_event (ourstatus);
3183 	}
3184     }
3185 
3186   if (event_child->collecting_fast_tracepoint
3187       != fast_tpoint_collect_result::not_collecting)
3188     {
3189       threads_debug_printf
3190 	("LWP %ld was trying to move out of the jump pad (%d). "
3191 	 "Check if we're already there.",
3192 	 lwpid_of (current_thread),
3193 	 (int) event_child->collecting_fast_tracepoint);
3194 
3195       trace_event = 1;
3196 
3197       event_child->collecting_fast_tracepoint
3198 	= linux_fast_tracepoint_collecting (event_child, NULL);
3199 
3200       if (event_child->collecting_fast_tracepoint
3201 	  != fast_tpoint_collect_result::before_insn)
3202 	{
3203 	  /* No longer need this breakpoint.  */
3204 	  if (event_child->exit_jump_pad_bkpt != NULL)
3205 	    {
3206 	      threads_debug_printf
3207 		("No longer need exit-jump-pad bkpt; removing it."
3208 		 "stopping all threads momentarily.");
3209 
3210 	      /* Other running threads could hit this breakpoint.
3211 		 We don't handle moribund locations like GDB does,
3212 		 instead we always pause all threads when removing
3213 		 breakpoints, so that any step-over or
3214 		 decr_pc_after_break adjustment is always taken
3215 		 care of while the breakpoint is still
3216 		 inserted.  */
3217 	      stop_all_lwps (1, event_child);
3218 
3219 	      delete_breakpoint (event_child->exit_jump_pad_bkpt);
3220 	      event_child->exit_jump_pad_bkpt = NULL;
3221 
3222 	      unstop_all_lwps (1, event_child);
3223 
3224 	      gdb_assert (event_child->suspended >= 0);
3225 	    }
3226 	}
3227 
3228       if (event_child->collecting_fast_tracepoint
3229 	  == fast_tpoint_collect_result::not_collecting)
3230 	{
3231 	  threads_debug_printf
3232 	    ("fast tracepoint finished collecting successfully.");
3233 
3234 	  /* We may have a deferred signal to report.  */
3235 	  if (dequeue_one_deferred_signal (event_child, &w))
3236 	    threads_debug_printf ("dequeued one signal.");
3237 	  else
3238 	    {
3239 	      threads_debug_printf ("no deferred signals.");
3240 
3241 	      if (stabilizing_threads)
3242 		{
3243 		  ourstatus->set_stopped (GDB_SIGNAL_0);
3244 
3245 		  threads_debug_printf
3246 		    ("ret = %s, stopped while stabilizing threads",
3247 		     target_pid_to_str (ptid_of (current_thread)).c_str ());
3248 
3249 		  return ptid_of (current_thread);
3250 		}
3251 	    }
3252 	}
3253     }
3254 
3255   /* Check whether GDB would be interested in this event.  */
3256 
3257   /* Check if GDB is interested in this syscall.  */
3258   if (WIFSTOPPED (w)
3259       && WSTOPSIG (w) == SYSCALL_SIGTRAP
3260       && !gdb_catch_this_syscall (event_child))
3261     {
3262       threads_debug_printf ("Ignored syscall for LWP %ld.",
3263 			    lwpid_of (current_thread));
3264 
3265       resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3266 
3267       return ignore_event (ourstatus);
3268     }
3269 
3270   /* If GDB is not interested in this signal, don't stop other
3271      threads, and don't report it to GDB.  Just resume the inferior
3272      right away.  We do this for threading-related signals as well as
3273      any that GDB specifically requested we ignore.  But never ignore
3274      SIGSTOP if we sent it ourselves, and do not ignore signals when
3275      stepping - they may require special handling to skip the signal
3276      handler. Also never ignore signals that could be caused by a
3277      breakpoint.  */
3278   if (WIFSTOPPED (w)
3279       && current_thread->last_resume_kind != resume_step
3280       && (
3281 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3282 	  (current_process ()->priv->thread_db != NULL
3283 	   && (WSTOPSIG (w) == __SIGRTMIN
3284 	       || WSTOPSIG (w) == __SIGRTMIN + 1))
3285 	  ||
3286 #endif
3287 	  (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3288 	   && !(WSTOPSIG (w) == SIGSTOP
3289 		&& current_thread->last_resume_kind == resume_stop)
3290 	   && !linux_wstatus_maybe_breakpoint (w))))
3291     {
3292       siginfo_t info, *info_p;
3293 
3294       threads_debug_printf ("Ignored signal %d for LWP %ld.",
3295 			    WSTOPSIG (w), lwpid_of (current_thread));
3296 
3297       if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3298 		  (PTRACE_TYPE_ARG3) 0, &info) == 0)
3299 	info_p = &info;
3300       else
3301 	info_p = NULL;
3302 
3303       if (step_over_finished)
3304 	{
3305 	  /* We cancelled this thread's step-over above.  We still
3306 	     need to unsuspend all other LWPs, and set them back
3307 	     running again while the signal handler runs.  */
3308 	  unsuspend_all_lwps (event_child);
3309 
3310 	  /* Enqueue the pending signal info so that proceed_all_lwps
3311 	     doesn't lose it.  */
3312 	  enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3313 
3314 	  proceed_all_lwps ();
3315 	}
3316       else
3317 	{
3318 	  resume_one_lwp (event_child, event_child->stepping,
3319 			  WSTOPSIG (w), info_p);
3320 	}
3321 
3322       return ignore_event (ourstatus);
3323     }
3324 
3325   /* Note that all addresses are always "out of the step range" when
3326      there's no range to begin with.  */
3327   in_step_range = lwp_in_step_range (event_child);
3328 
3329   /* If GDB wanted this thread to single step, and the thread is out
3330      of the step range, we always want to report the SIGTRAP, and let
3331      GDB handle it.  Watchpoints should always be reported.  So should
3332      signals we can't explain.  A SIGTRAP we can't explain could be a
3333      GDB breakpoint --- we may or not support Z0 breakpoints.  If we
3334      do, we're be able to handle GDB breakpoints on top of internal
3335      breakpoints, by handling the internal breakpoint and still
3336      reporting the event to GDB.  If we don't, we're out of luck, GDB
3337      won't see the breakpoint hit.  If we see a single-step event but
3338      the thread should be continuing, don't pass the trap to gdb.
3339      That indicates that we had previously finished a single-step but
3340      left the single-step pending -- see
3341      complete_ongoing_step_over.  */
3342   report_to_gdb = (!maybe_internal_trap
3343 		   || (current_thread->last_resume_kind == resume_step
3344 		       && !in_step_range)
3345 		   || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3346 		   || (!in_step_range
3347 		       && !bp_explains_trap
3348 		       && !trace_event
3349 		       && !step_over_finished
3350 		       && !(current_thread->last_resume_kind == resume_continue
3351 			    && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3352 		   || (gdb_breakpoint_here (event_child->stop_pc)
3353 		       && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3354 		       && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3355 		   || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3356 
3357   run_breakpoint_commands (event_child->stop_pc);
3358 
3359   /* We found no reason GDB would want us to stop.  We either hit one
3360      of our own breakpoints, or finished an internal step GDB
3361      shouldn't know about.  */
3362   if (!report_to_gdb)
3363     {
3364       if (bp_explains_trap)
3365 	threads_debug_printf ("Hit a gdbserver breakpoint.");
3366 
3367       if (step_over_finished)
3368 	threads_debug_printf ("Step-over finished.");
3369 
3370       if (trace_event)
3371 	threads_debug_printf ("Tracepoint event.");
3372 
3373       if (lwp_in_step_range (event_child))
3374 	threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3375 			      paddress (event_child->stop_pc),
3376 			      paddress (event_child->step_range_start),
3377 			      paddress (event_child->step_range_end));
3378 
3379       /* We're not reporting this breakpoint to GDB, so apply the
3380 	 decr_pc_after_break adjustment to the inferior's regcache
3381 	 ourselves.  */
3382 
3383       if (low_supports_breakpoints ())
3384 	{
3385 	  struct regcache *regcache
3386 	    = get_thread_regcache (current_thread, 1);
3387 	  low_set_pc (regcache, event_child->stop_pc);
3388 	}
3389 
3390       if (step_over_finished)
3391 	{
3392 	  /* If we have finished stepping over a breakpoint, we've
3393 	     stopped and suspended all LWPs momentarily except the
3394 	     stepping one.  This is where we resume them all again.
3395 	     We're going to keep waiting, so use proceed, which
3396 	     handles stepping over the next breakpoint.  */
3397 	  unsuspend_all_lwps (event_child);
3398 	}
3399       else
3400 	{
3401 	  /* Remove the single-step breakpoints if any.  Note that
3402 	     there isn't single-step breakpoint if we finished stepping
3403 	     over.  */
3404 	  if (supports_software_single_step ()
3405 	      && has_single_step_breakpoints (current_thread))
3406 	    {
3407 	      stop_all_lwps (0, event_child);
3408 	      delete_single_step_breakpoints (current_thread);
3409 	      unstop_all_lwps (0, event_child);
3410 	    }
3411 	}
3412 
3413       threads_debug_printf ("proceeding all threads.");
3414 
3415       proceed_all_lwps ();
3416 
3417       return ignore_event (ourstatus);
3418     }
3419 
3420     if (debug_threads)
3421       {
3422 	if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3423 	  threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3424 				lwpid_of (get_lwp_thread (event_child)),
3425 				event_child->waitstatus.to_string ().c_str ());
3426 
3427 	if (current_thread->last_resume_kind == resume_step)
3428 	  {
3429 	    if (event_child->step_range_start == event_child->step_range_end)
3430 	      threads_debug_printf
3431 		("GDB wanted to single-step, reporting event.");
3432 	    else if (!lwp_in_step_range (event_child))
3433 	      threads_debug_printf ("Out of step range, reporting event.");
3434 	  }
3435 
3436 	if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3437 	  threads_debug_printf ("Stopped by watchpoint.");
3438 	else if (gdb_breakpoint_here (event_child->stop_pc))
3439 	  threads_debug_printf ("Stopped by GDB breakpoint.");
3440       }
3441 
3442     threads_debug_printf ("Hit a non-gdbserver trap event.");
3443 
3444   /* Alright, we're going to report a stop.  */
3445 
3446   /* Remove single-step breakpoints.  */
3447   if (supports_software_single_step ())
3448     {
3449       /* Remove single-step breakpoints or not.  It it is true, stop all
3450 	 lwps, so that other threads won't hit the breakpoint in the
3451 	 staled memory.  */
3452       int remove_single_step_breakpoints_p = 0;
3453 
3454       if (non_stop)
3455 	{
3456 	  remove_single_step_breakpoints_p
3457 	    = has_single_step_breakpoints (current_thread);
3458 	}
3459       else
3460 	{
3461 	  /* In all-stop, a stop reply cancels all previous resume
3462 	     requests.  Delete all single-step breakpoints.  */
3463 
3464 	  find_thread ([&] (thread_info *thread) {
3465 	    if (has_single_step_breakpoints (thread))
3466 	      {
3467 		remove_single_step_breakpoints_p = 1;
3468 		return true;
3469 	      }
3470 
3471 	    return false;
3472 	  });
3473 	}
3474 
3475       if (remove_single_step_breakpoints_p)
3476 	{
3477 	  /* If we remove single-step breakpoints from memory, stop all lwps,
3478 	     so that other threads won't hit the breakpoint in the staled
3479 	     memory.  */
3480 	  stop_all_lwps (0, event_child);
3481 
3482 	  if (non_stop)
3483 	    {
3484 	      gdb_assert (has_single_step_breakpoints (current_thread));
3485 	      delete_single_step_breakpoints (current_thread);
3486 	    }
3487 	  else
3488 	    {
3489 	      for_each_thread ([] (thread_info *thread){
3490 		if (has_single_step_breakpoints (thread))
3491 		  delete_single_step_breakpoints (thread);
3492 	      });
3493 	    }
3494 
3495 	  unstop_all_lwps (0, event_child);
3496 	}
3497     }
3498 
3499   if (!stabilizing_threads)
3500     {
3501       /* In all-stop, stop all threads.  */
3502       if (!non_stop)
3503 	stop_all_lwps (0, NULL);
3504 
3505       if (step_over_finished)
3506 	{
3507 	  if (!non_stop)
3508 	    {
3509 	      /* If we were doing a step-over, all other threads but
3510 		 the stepping one had been paused in start_step_over,
3511 		 with their suspend counts incremented.  We don't want
3512 		 to do a full unstop/unpause, because we're in
3513 		 all-stop mode (so we want threads stopped), but we
3514 		 still need to unsuspend the other threads, to
3515 		 decrement their `suspended' count back.  */
3516 	      unsuspend_all_lwps (event_child);
3517 	    }
3518 	  else
3519 	    {
3520 	      /* If we just finished a step-over, then all threads had
3521 		 been momentarily paused.  In all-stop, that's fine,
3522 		 we want threads stopped by now anyway.  In non-stop,
3523 		 we need to re-resume threads that GDB wanted to be
3524 		 running.  */
3525 	      unstop_all_lwps (1, event_child);
3526 	    }
3527 	}
3528 
3529       /* If we're not waiting for a specific LWP, choose an event LWP
3530 	 from among those that have had events.  Giving equal priority
3531 	 to all LWPs that have had events helps prevent
3532 	 starvation.  */
3533       if (ptid == minus_one_ptid)
3534 	{
3535 	  event_child->status_pending_p = 1;
3536 	  event_child->status_pending = w;
3537 
3538 	  select_event_lwp (&event_child);
3539 
3540 	  /* current_thread and event_child must stay in sync.  */
3541 	  switch_to_thread (get_lwp_thread (event_child));
3542 
3543 	  event_child->status_pending_p = 0;
3544 	  w = event_child->status_pending;
3545 	}
3546 
3547 
3548       /* Stabilize threads (move out of jump pads).  */
3549       if (!non_stop)
3550 	target_stabilize_threads ();
3551     }
3552   else
3553     {
3554       /* If we just finished a step-over, then all threads had been
3555 	 momentarily paused.  In all-stop, that's fine, we want
3556 	 threads stopped by now anyway.  In non-stop, we need to
3557 	 re-resume threads that GDB wanted to be running.  */
3558       if (step_over_finished)
3559 	unstop_all_lwps (1, event_child);
3560     }
3561 
3562   /* At this point, we haven't set OURSTATUS.  This is where we do it.  */
3563   gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3564 
3565   if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3566     {
3567       /* If the reported event is an exit, fork, vfork, clone or exec,
3568 	 let GDB know.  */
3569 
3570       /* Break the unreported fork/vfork/clone relationship chain.  */
3571       if (is_new_child_status (event_child->waitstatus.kind ()))
3572 	{
3573 	  event_child->relative->relative = NULL;
3574 	  event_child->relative = NULL;
3575 	}
3576 
3577       *ourstatus = event_child->waitstatus;
3578       /* Clear the event lwp's waitstatus since we handled it already.  */
3579       event_child->waitstatus.set_ignore ();
3580     }
3581   else
3582     {
3583       /* The LWP stopped due to a plain signal or a syscall signal.  Either way,
3584 	 event_child->waitstatus wasn't filled in with the details, so look at
3585 	 the wait status W.  */
3586       if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3587 	{
3588 	  int syscall_number;
3589 
3590 	  get_syscall_trapinfo (event_child, &syscall_number);
3591 	  if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3592 	    ourstatus->set_syscall_entry (syscall_number);
3593 	  else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3594 	    ourstatus->set_syscall_return (syscall_number);
3595 	  else
3596 	    gdb_assert_not_reached ("unexpected syscall state");
3597 	}
3598       else if (current_thread->last_resume_kind == resume_stop
3599 	       && WSTOPSIG (w) == SIGSTOP)
3600 	{
3601 	  /* A thread that has been requested to stop by GDB with vCont;t,
3602 	     and it stopped cleanly, so report as SIG0.  The use of
3603 	     SIGSTOP is an implementation detail.  */
3604 	  ourstatus->set_stopped (GDB_SIGNAL_0);
3605 	}
3606       else
3607 	ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3608     }
3609 
3610   /* Now that we've selected our final event LWP, un-adjust its PC if
3611      it was a software breakpoint, and the client doesn't know we can
3612      adjust the breakpoint ourselves.  */
3613   if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3614       && !cs.swbreak_feature)
3615     {
3616       int decr_pc = low_decr_pc_after_break ();
3617 
3618       if (decr_pc != 0)
3619 	{
3620 	  struct regcache *regcache
3621 	    = get_thread_regcache (current_thread, 1);
3622 	  low_set_pc (regcache, event_child->stop_pc + decr_pc);
3623 	}
3624     }
3625 
3626   gdb_assert (step_over_bkpt == null_ptid);
3627 
3628   threads_debug_printf ("ret = %s, %s",
3629 			target_pid_to_str (ptid_of (current_thread)).c_str (),
3630 			ourstatus->to_string ().c_str ());
3631 
3632   return filter_exit_event (event_child, ourstatus);
3633 }
3634 
3635 /* Get rid of any pending event in the pipe.  */
3636 static void
3637 async_file_flush (void)
3638 {
3639   linux_event_pipe.flush ();
3640 }
3641 
3642 /* Put something in the pipe, so the event loop wakes up.  */
3643 static void
3644 async_file_mark (void)
3645 {
3646   linux_event_pipe.mark ();
3647 }
3648 
3649 ptid_t
3650 linux_process_target::wait (ptid_t ptid,
3651 			    target_waitstatus *ourstatus,
3652 			    target_wait_flags target_options)
3653 {
3654   ptid_t event_ptid;
3655 
3656   /* Flush the async file first.  */
3657   if (target_is_async_p ())
3658     async_file_flush ();
3659 
3660   do
3661     {
3662       event_ptid = wait_1 (ptid, ourstatus, target_options);
3663     }
3664   while ((target_options & TARGET_WNOHANG) == 0
3665 	 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3666 
3667   /* If at least one stop was reported, there may be more.  A single
3668      SIGCHLD can signal more than one child stop.  */
3669   if (target_is_async_p ()
3670       && (target_options & TARGET_WNOHANG) != 0
3671       && event_ptid != null_ptid)
3672     async_file_mark ();
3673 
3674   return event_ptid;
3675 }
3676 
3677 /* Send a signal to an LWP.  */
3678 
3679 static int
3680 kill_lwp (unsigned long lwpid, int signo)
3681 {
3682   int ret;
3683 
3684   errno = 0;
3685   ret = syscall (__NR_tkill, lwpid, signo);
3686   if (errno == ENOSYS)
3687     {
3688       /* If tkill fails, then we are not using nptl threads, a
3689 	 configuration we no longer support.  */
3690       perror_with_name (("tkill"));
3691     }
3692   return ret;
3693 }
3694 
3695 void
3696 linux_stop_lwp (struct lwp_info *lwp)
3697 {
3698   send_sigstop (lwp);
3699 }
3700 
3701 static void
3702 send_sigstop (struct lwp_info *lwp)
3703 {
3704   int pid;
3705 
3706   pid = lwpid_of (get_lwp_thread (lwp));
3707 
3708   /* If we already have a pending stop signal for this process, don't
3709      send another.  */
3710   if (lwp->stop_expected)
3711     {
3712       threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3713 
3714       return;
3715     }
3716 
3717   threads_debug_printf ("Sending sigstop to lwp %d", pid);
3718 
3719   lwp->stop_expected = 1;
3720   kill_lwp (pid, SIGSTOP);
3721 }
3722 
3723 static void
3724 send_sigstop (thread_info *thread, lwp_info *except)
3725 {
3726   struct lwp_info *lwp = get_thread_lwp (thread);
3727 
3728   /* Ignore EXCEPT.  */
3729   if (lwp == except)
3730     return;
3731 
3732   if (lwp->stopped)
3733     return;
3734 
3735   send_sigstop (lwp);
3736 }
3737 
3738 /* Increment the suspend count of an LWP, and stop it, if not stopped
3739    yet.  */
3740 static void
3741 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3742 {
3743   struct lwp_info *lwp = get_thread_lwp (thread);
3744 
3745   /* Ignore EXCEPT.  */
3746   if (lwp == except)
3747     return;
3748 
3749   lwp_suspended_inc (lwp);
3750 
3751   send_sigstop (thread, except);
3752 }
3753 
3754 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3755    If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3756    instead of a process exit event.  This is meaningful for the leader
3757    thread, as we normally report a process-wide exit event when we see
3758    the leader exit, and a thread exit event when we see any other
3759    thread exit.  */
3760 
3761 static void
3762 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3763 {
3764   /* Store the exit status for later.  */
3765   lwp->status_pending_p = 1;
3766   lwp->status_pending = wstat;
3767 
3768   /* Store in waitstatus as well, as there's nothing else to process
3769      for this event.  */
3770   if (WIFEXITED (wstat))
3771     {
3772       if (thread_event)
3773 	lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3774       else
3775 	lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3776     }
3777   else if (WIFSIGNALED (wstat))
3778     {
3779       gdb_assert (!thread_event);
3780       lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3781     }
3782   else
3783     gdb_assert_not_reached ("unknown status kind");
3784 
3785   /* Prevent trying to stop it.  */
3786   lwp->stopped = 1;
3787 
3788   /* No further stops are expected from a dead lwp.  */
3789   lwp->stop_expected = 0;
3790 }
3791 
3792 /* Return true if LWP has exited already, and has a pending exit event
3793    to report to GDB.  */
3794 
3795 static int
3796 lwp_is_marked_dead (struct lwp_info *lwp)
3797 {
3798   return (lwp->status_pending_p
3799 	  && (WIFEXITED (lwp->status_pending)
3800 	      || WIFSIGNALED (lwp->status_pending)));
3801 }
3802 
3803 void
3804 linux_process_target::wait_for_sigstop ()
3805 {
3806   struct thread_info *saved_thread;
3807   ptid_t saved_tid;
3808   int wstat;
3809   int ret;
3810 
3811   saved_thread = current_thread;
3812   if (saved_thread != NULL)
3813     saved_tid = saved_thread->id;
3814   else
3815     saved_tid = null_ptid; /* avoid bogus unused warning */
3816 
3817   scoped_restore_current_thread restore_thread;
3818 
3819   threads_debug_printf ("pulling events");
3820 
3821   /* Passing NULL_PTID as filter indicates we want all events to be
3822      left pending.  Eventually this returns when there are no
3823      unwaited-for children left.  */
3824   ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3825   gdb_assert (ret == -1);
3826 
3827   if (saved_thread == NULL || mythread_alive (saved_tid))
3828     return;
3829   else
3830     {
3831       threads_debug_printf ("Previously current thread died.");
3832 
3833       /* We can't change the current inferior behind GDB's back,
3834 	 otherwise, a subsequent command may apply to the wrong
3835 	 process.  */
3836       restore_thread.dont_restore ();
3837       switch_to_thread (nullptr);
3838     }
3839 }
3840 
3841 bool
3842 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3843 {
3844   struct lwp_info *lwp = get_thread_lwp (thread);
3845 
3846   if (lwp->suspended != 0)
3847     {
3848       internal_error ("LWP %ld is suspended, suspended=%d\n",
3849 		      lwpid_of (thread), lwp->suspended);
3850     }
3851   gdb_assert (lwp->stopped);
3852 
3853   /* Allow debugging the jump pad, gdb_collect, etc..  */
3854   return (supports_fast_tracepoints ()
3855 	  && agent_loaded_p ()
3856 	  && (gdb_breakpoint_here (lwp->stop_pc)
3857 	      || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3858 	      || thread->last_resume_kind == resume_step)
3859 	  && (linux_fast_tracepoint_collecting (lwp, NULL)
3860 	      != fast_tpoint_collect_result::not_collecting));
3861 }
3862 
3863 void
3864 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3865 {
3866   struct lwp_info *lwp = get_thread_lwp (thread);
3867   int *wstat;
3868 
3869   if (lwp->suspended != 0)
3870     {
3871       internal_error ("LWP %ld is suspended, suspended=%d\n",
3872 		      lwpid_of (thread), lwp->suspended);
3873     }
3874   gdb_assert (lwp->stopped);
3875 
3876   /* For gdb_breakpoint_here.  */
3877   scoped_restore_current_thread restore_thread;
3878   switch_to_thread (thread);
3879 
3880   wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3881 
3882   /* Allow debugging the jump pad, gdb_collect, etc.  */
3883   if (!gdb_breakpoint_here (lwp->stop_pc)
3884       && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3885       && thread->last_resume_kind != resume_step
3886       && maybe_move_out_of_jump_pad (lwp, wstat))
3887     {
3888       threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3889 			    lwpid_of (thread));
3890 
3891       if (wstat)
3892 	{
3893 	  lwp->status_pending_p = 0;
3894 	  enqueue_one_deferred_signal (lwp, wstat);
3895 
3896 	  threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3897 				WSTOPSIG (*wstat), lwpid_of (thread));
3898 	}
3899 
3900       resume_one_lwp (lwp, 0, 0, NULL);
3901     }
3902   else
3903     lwp_suspended_inc (lwp);
3904 }
3905 
3906 static bool
3907 lwp_running (thread_info *thread)
3908 {
3909   struct lwp_info *lwp = get_thread_lwp (thread);
3910 
3911   if (lwp_is_marked_dead (lwp))
3912     return false;
3913 
3914   return !lwp->stopped;
3915 }
3916 
3917 void
3918 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3919 {
3920   /* Should not be called recursively.  */
3921   gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3922 
3923   THREADS_SCOPED_DEBUG_ENTER_EXIT;
3924 
3925   threads_debug_printf
3926     ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3927      (except != NULL
3928       ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3929       : "none"));
3930 
3931   stopping_threads = (suspend
3932 		      ? STOPPING_AND_SUSPENDING_THREADS
3933 		      : STOPPING_THREADS);
3934 
3935   if (suspend)
3936     for_each_thread ([&] (thread_info *thread)
3937       {
3938 	suspend_and_send_sigstop (thread, except);
3939       });
3940   else
3941     for_each_thread ([&] (thread_info *thread)
3942       {
3943 	 send_sigstop (thread, except);
3944       });
3945 
3946   wait_for_sigstop ();
3947   stopping_threads = NOT_STOPPING_THREADS;
3948 
3949   threads_debug_printf ("setting stopping_threads back to !stopping");
3950 }
3951 
3952 /* Enqueue one signal in the chain of signals which need to be
3953    delivered to this process on next resume.  */
3954 
3955 static void
3956 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3957 {
3958   lwp->pending_signals.emplace_back (signal);
3959   if (info == nullptr)
3960     memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3961   else
3962     lwp->pending_signals.back ().info = *info;
3963 }
3964 
3965 void
3966 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3967 {
3968   struct thread_info *thread = get_lwp_thread (lwp);
3969   struct regcache *regcache = get_thread_regcache (thread, 1);
3970 
3971   scoped_restore_current_thread restore_thread;
3972 
3973   switch_to_thread (thread);
3974   std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3975 
3976   for (CORE_ADDR pc : next_pcs)
3977     set_single_step_breakpoint (pc, current_ptid);
3978 }
3979 
3980 int
3981 linux_process_target::single_step (lwp_info* lwp)
3982 {
3983   int step = 0;
3984 
3985   if (supports_hardware_single_step ())
3986     {
3987       step = 1;
3988     }
3989   else if (supports_software_single_step ())
3990     {
3991       install_software_single_step_breakpoints (lwp);
3992       step = 0;
3993     }
3994   else
3995     threads_debug_printf ("stepping is not implemented on this target");
3996 
3997   return step;
3998 }
3999 
4000 /* The signal can be delivered to the inferior if we are not trying to
4001    finish a fast tracepoint collect.  Since signal can be delivered in
4002    the step-over, the program may go to signal handler and trap again
4003    after return from the signal handler.  We can live with the spurious
4004    double traps.  */
4005 
4006 static int
4007 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4008 {
4009   return (lwp->collecting_fast_tracepoint
4010 	  == fast_tpoint_collect_result::not_collecting);
4011 }
4012 
4013 void
4014 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4015 					    int signal, siginfo_t *info)
4016 {
4017   struct thread_info *thread = get_lwp_thread (lwp);
4018   int ptrace_request;
4019   struct process_info *proc = get_thread_process (thread);
4020 
4021   /* Note that target description may not be initialised
4022      (proc->tdesc == NULL) at this point because the program hasn't
4023      stopped at the first instruction yet.  It means GDBserver skips
4024      the extra traps from the wrapper program (see option --wrapper).
4025      Code in this function that requires register access should be
4026      guarded by proc->tdesc == NULL or something else.  */
4027 
4028   if (lwp->stopped == 0)
4029     return;
4030 
4031   gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4032 
4033   fast_tpoint_collect_result fast_tp_collecting
4034     = lwp->collecting_fast_tracepoint;
4035 
4036   gdb_assert (!stabilizing_threads
4037 	      || (fast_tp_collecting
4038 		  != fast_tpoint_collect_result::not_collecting));
4039 
4040   /* Cancel actions that rely on GDB not changing the PC (e.g., the
4041      user used the "jump" command, or "set $pc = foo").  */
4042   if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4043     {
4044       /* Collecting 'while-stepping' actions doesn't make sense
4045 	 anymore.  */
4046       release_while_stepping_state_list (thread);
4047     }
4048 
4049   /* If we have pending signals or status, and a new signal, enqueue the
4050      signal.  Also enqueue the signal if it can't be delivered to the
4051      inferior right now.  */
4052   if (signal != 0
4053       && (lwp->status_pending_p
4054 	  || !lwp->pending_signals.empty ()
4055 	  || !lwp_signal_can_be_delivered (lwp)))
4056     {
4057       enqueue_pending_signal (lwp, signal, info);
4058 
4059       /* Postpone any pending signal.  It was enqueued above.  */
4060       signal = 0;
4061     }
4062 
4063   if (lwp->status_pending_p)
4064     {
4065       threads_debug_printf
4066 	("Not resuming lwp %ld (%s, stop %s); has pending status",
4067 	 lwpid_of (thread), step ? "step" : "continue",
4068 	 lwp->stop_expected ? "expected" : "not expected");
4069       return;
4070     }
4071 
4072   scoped_restore_current_thread restore_thread;
4073   switch_to_thread (thread);
4074 
4075   /* This bit needs some thinking about.  If we get a signal that
4076      we must report while a single-step reinsert is still pending,
4077      we often end up resuming the thread.  It might be better to
4078      (ew) allow a stack of pending events; then we could be sure that
4079      the reinsert happened right away and not lose any signals.
4080 
4081      Making this stack would also shrink the window in which breakpoints are
4082      uninserted (see comment in linux_wait_for_lwp) but not enough for
4083      complete correctness, so it won't solve that problem.  It may be
4084      worthwhile just to solve this one, however.  */
4085   if (lwp->bp_reinsert != 0)
4086     {
4087       threads_debug_printf ("  pending reinsert at 0x%s",
4088 			    paddress (lwp->bp_reinsert));
4089 
4090       if (supports_hardware_single_step ())
4091 	{
4092 	  if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4093 	    {
4094 	      if (step == 0)
4095 		warning ("BAD - reinserting but not stepping.");
4096 	      if (lwp->suspended)
4097 		warning ("BAD - reinserting and suspended(%d).",
4098 				 lwp->suspended);
4099 	    }
4100 	}
4101 
4102       step = maybe_hw_step (thread);
4103     }
4104 
4105   if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4106     threads_debug_printf
4107       ("lwp %ld wants to get out of fast tracepoint jump pad "
4108        "(exit-jump-pad-bkpt)", lwpid_of (thread));
4109 
4110   else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4111     {
4112       threads_debug_printf
4113 	("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4114 	 lwpid_of (thread));
4115 
4116       if (supports_hardware_single_step ())
4117 	step = 1;
4118       else
4119 	{
4120 	  internal_error ("moving out of jump pad single-stepping"
4121 			  " not implemented on this target");
4122 	}
4123     }
4124 
4125   /* If we have while-stepping actions in this thread set it stepping.
4126      If we have a signal to deliver, it may or may not be set to
4127      SIG_IGN, we don't know.  Assume so, and allow collecting
4128      while-stepping into a signal handler.  A possible smart thing to
4129      do would be to set an internal breakpoint at the signal return
4130      address, continue, and carry on catching this while-stepping
4131      action only when that breakpoint is hit.  A future
4132      enhancement.  */
4133   if (thread->while_stepping != NULL)
4134     {
4135       threads_debug_printf
4136 	("lwp %ld has a while-stepping action -> forcing step.",
4137 	 lwpid_of (thread));
4138 
4139       step = single_step (lwp);
4140     }
4141 
4142   if (proc->tdesc != NULL && low_supports_breakpoints ())
4143     {
4144       struct regcache *regcache = get_thread_regcache (current_thread, 1);
4145 
4146       lwp->stop_pc = low_get_pc (regcache);
4147 
4148       threads_debug_printf ("  %s from pc 0x%lx", step ? "step" : "continue",
4149 			    (long) lwp->stop_pc);
4150     }
4151 
4152   /* If we have pending signals, consume one if it can be delivered to
4153      the inferior.  */
4154   if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4155     {
4156       const pending_signal &p_sig = lwp->pending_signals.front ();
4157 
4158       signal = p_sig.signal;
4159       if (p_sig.info.si_signo != 0)
4160 	ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4161 		&p_sig.info);
4162 
4163       lwp->pending_signals.pop_front ();
4164     }
4165 
4166   threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4167 			lwpid_of (thread), step ? "step" : "continue", signal,
4168 			lwp->stop_expected ? "expected" : "not expected");
4169 
4170   low_prepare_to_resume (lwp);
4171 
4172   regcache_invalidate_thread (thread);
4173   errno = 0;
4174   lwp->stepping = step;
4175   if (step)
4176     ptrace_request = PTRACE_SINGLESTEP;
4177   else if (gdb_catching_syscalls_p (lwp))
4178     ptrace_request = PTRACE_SYSCALL;
4179   else
4180     ptrace_request = PTRACE_CONT;
4181   ptrace (ptrace_request,
4182 	  lwpid_of (thread),
4183 	  (PTRACE_TYPE_ARG3) 0,
4184 	  /* Coerce to a uintptr_t first to avoid potential gcc warning
4185 	     of coercing an 8 byte integer to a 4 byte pointer.  */
4186 	  (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4187 
4188   if (errno)
4189     {
4190       int saved_errno = errno;
4191 
4192       threads_debug_printf ("ptrace errno = %d (%s)",
4193 			    saved_errno, strerror (saved_errno));
4194 
4195       errno = saved_errno;
4196       perror_with_name ("resuming thread");
4197     }
4198 
4199   /* Successfully resumed.  Clear state that no longer makes sense,
4200      and mark the LWP as running.  Must not do this before resuming
4201      otherwise if that fails other code will be confused.  E.g., we'd
4202      later try to stop the LWP and hang forever waiting for a stop
4203      status.  Note that we must not throw after this is cleared,
4204      otherwise handle_zombie_lwp_error would get confused.  */
4205   lwp->stopped = 0;
4206   lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4207 }
4208 
4209 void
4210 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4211 {
4212   /* Nop.  */
4213 }
4214 
4215 /* Called when we try to resume a stopped LWP and that errors out.  If
4216    the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4217    or about to become), discard the error, clear any pending status
4218    the LWP may have, and return true (we'll collect the exit status
4219    soon enough).  Otherwise, return false.  */
4220 
4221 static int
4222 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4223 {
4224   struct thread_info *thread = get_lwp_thread (lp);
4225 
4226   /* If we get an error after resuming the LWP successfully, we'd
4227      confuse !T state for the LWP being gone.  */
4228   gdb_assert (lp->stopped);
4229 
4230   /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4231      because even if ptrace failed with ESRCH, the tracee may be "not
4232      yet fully dead", but already refusing ptrace requests.  In that
4233      case the tracee has 'R (Running)' state for a little bit
4234      (observed in Linux 3.18).  See also the note on ESRCH in the
4235      ptrace(2) man page.  Instead, check whether the LWP has any state
4236      other than ptrace-stopped.  */
4237 
4238   /* Don't assume anything if /proc/PID/status can't be read.  */
4239   if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4240     {
4241       lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4242       lp->status_pending_p = 0;
4243       return 1;
4244     }
4245   return 0;
4246 }
4247 
4248 void
4249 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4250 				      siginfo_t *info)
4251 {
4252   try
4253     {
4254       resume_one_lwp_throw (lwp, step, signal, info);
4255     }
4256   catch (const gdb_exception_error &ex)
4257     {
4258       if (check_ptrace_stopped_lwp_gone (lwp))
4259 	{
4260 	  /* This could because we tried to resume an LWP after its leader
4261 	     exited.  Mark it as resumed, so we can collect an exit event
4262 	     from it.  */
4263 	  lwp->stopped = 0;
4264 	  lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4265 	}
4266       else
4267 	throw;
4268     }
4269 }
4270 
4271 /* This function is called once per thread via for_each_thread.
4272    We look up which resume request applies to THREAD and mark it with a
4273    pointer to the appropriate resume request.
4274 
4275    This algorithm is O(threads * resume elements), but resume elements
4276    is small (and will remain small at least until GDB supports thread
4277    suspension).  */
4278 
4279 static void
4280 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4281 {
4282   struct lwp_info *lwp = get_thread_lwp (thread);
4283 
4284   for (int ndx = 0; ndx < n; ndx++)
4285     {
4286       ptid_t ptid = resume[ndx].thread;
4287       if (ptid == minus_one_ptid
4288 	  || ptid == thread->id
4289 	  /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4290 	     of PID'.  */
4291 	  || (ptid.pid () == pid_of (thread)
4292 	      && (ptid.is_pid ()
4293 		  || ptid.lwp () == -1)))
4294 	{
4295 	  if (resume[ndx].kind == resume_stop
4296 	      && thread->last_resume_kind == resume_stop)
4297 	    {
4298 	      threads_debug_printf
4299 		("already %s LWP %ld at GDB's request",
4300 		 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4301 		  ? "stopped" : "stopping"),
4302 		  lwpid_of (thread));
4303 
4304 	      continue;
4305 	    }
4306 
4307 	  /* Ignore (wildcard) resume requests for already-resumed
4308 	     threads.  */
4309 	  if (resume[ndx].kind != resume_stop
4310 	      && thread->last_resume_kind != resume_stop)
4311 	    {
4312 	      threads_debug_printf
4313 		("already %s LWP %ld at GDB's request",
4314 		 (thread->last_resume_kind == resume_step
4315 		  ? "stepping" : "continuing"),
4316 		 lwpid_of (thread));
4317 	      continue;
4318 	    }
4319 
4320 	  /* Don't let wildcard resumes resume fork/vfork/clone
4321 	     children that GDB does not yet know are new children.  */
4322 	  if (lwp->relative != NULL)
4323 	    {
4324 	      struct lwp_info *rel = lwp->relative;
4325 
4326 	      if (rel->status_pending_p
4327 		  && is_new_child_status (rel->waitstatus.kind ()))
4328 		{
4329 		  threads_debug_printf
4330 		    ("not resuming LWP %ld: has queued stop reply",
4331 		     lwpid_of (thread));
4332 		  continue;
4333 		}
4334 	    }
4335 
4336 	  /* If the thread has a pending event that has already been
4337 	     reported to GDBserver core, but GDB has not pulled the
4338 	     event out of the vStopped queue yet, likewise, ignore the
4339 	     (wildcard) resume request.  */
4340 	  if (in_queued_stop_replies (thread->id))
4341 	    {
4342 	      threads_debug_printf
4343 		("not resuming LWP %ld: has queued stop reply",
4344 		 lwpid_of (thread));
4345 	      continue;
4346 	    }
4347 
4348 	  lwp->resume = &resume[ndx];
4349 	  thread->last_resume_kind = lwp->resume->kind;
4350 
4351 	  lwp->step_range_start = lwp->resume->step_range_start;
4352 	  lwp->step_range_end = lwp->resume->step_range_end;
4353 
4354 	  /* If we had a deferred signal to report, dequeue one now.
4355 	     This can happen if LWP gets more than one signal while
4356 	     trying to get out of a jump pad.  */
4357 	  if (lwp->stopped
4358 	      && !lwp->status_pending_p
4359 	      && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4360 	    {
4361 	      lwp->status_pending_p = 1;
4362 
4363 	      threads_debug_printf
4364 		("Dequeueing deferred signal %d for LWP %ld, "
4365 		 "leaving status pending.",
4366 		 WSTOPSIG (lwp->status_pending),
4367 		 lwpid_of (thread));
4368 	    }
4369 
4370 	  return;
4371 	}
4372     }
4373 
4374   /* No resume action for this thread.  */
4375   lwp->resume = NULL;
4376 }
4377 
4378 bool
4379 linux_process_target::resume_status_pending (thread_info *thread)
4380 {
4381   struct lwp_info *lwp = get_thread_lwp (thread);
4382 
4383   /* LWPs which will not be resumed are not interesting, because
4384      we might not wait for them next time through linux_wait.  */
4385   if (lwp->resume == NULL)
4386     return false;
4387 
4388   return thread_still_has_status_pending (thread);
4389 }
4390 
4391 bool
4392 linux_process_target::thread_needs_step_over (thread_info *thread)
4393 {
4394   struct lwp_info *lwp = get_thread_lwp (thread);
4395   CORE_ADDR pc;
4396   struct process_info *proc = get_thread_process (thread);
4397 
4398   /* GDBserver is skipping the extra traps from the wrapper program,
4399      don't have to do step over.  */
4400   if (proc->tdesc == NULL)
4401     return false;
4402 
4403   /* LWPs which will not be resumed are not interesting, because we
4404      might not wait for them next time through linux_wait.  */
4405 
4406   if (!lwp->stopped)
4407     {
4408       threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4409 			    lwpid_of (thread));
4410       return false;
4411     }
4412 
4413   if (thread->last_resume_kind == resume_stop)
4414     {
4415       threads_debug_printf
4416 	("Need step over [LWP %ld]? Ignoring, should remain stopped",
4417 	 lwpid_of (thread));
4418       return false;
4419     }
4420 
4421   gdb_assert (lwp->suspended >= 0);
4422 
4423   if (lwp->suspended)
4424     {
4425       threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4426 			    lwpid_of (thread));
4427       return false;
4428     }
4429 
4430   if (lwp->status_pending_p)
4431     {
4432       threads_debug_printf
4433 	("Need step over [LWP %ld]? Ignoring, has pending status.",
4434 	 lwpid_of (thread));
4435       return false;
4436     }
4437 
4438   /* Note: PC, not STOP_PC.  Either GDB has adjusted the PC already,
4439      or we have.  */
4440   pc = get_pc (lwp);
4441 
4442   /* If the PC has changed since we stopped, then don't do anything,
4443      and let the breakpoint/tracepoint be hit.  This happens if, for
4444      instance, GDB handled the decr_pc_after_break subtraction itself,
4445      GDB is OOL stepping this thread, or the user has issued a "jump"
4446      command, or poked thread's registers herself.  */
4447   if (pc != lwp->stop_pc)
4448     {
4449       threads_debug_printf
4450 	("Need step over [LWP %ld]? Cancelling, PC was changed. "
4451 	 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4452 	 paddress (lwp->stop_pc), paddress (pc));
4453       return false;
4454     }
4455 
4456   /* On software single step target, resume the inferior with signal
4457      rather than stepping over.  */
4458   if (supports_software_single_step ()
4459       && !lwp->pending_signals.empty ()
4460       && lwp_signal_can_be_delivered (lwp))
4461     {
4462       threads_debug_printf
4463 	("Need step over [LWP %ld]? Ignoring, has pending signals.",
4464 	 lwpid_of (thread));
4465 
4466       return false;
4467     }
4468 
4469   scoped_restore_current_thread restore_thread;
4470   switch_to_thread (thread);
4471 
4472   /* We can only step over breakpoints we know about.  */
4473   if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4474     {
4475       /* Don't step over a breakpoint that GDB expects to hit
4476 	 though.  If the condition is being evaluated on the target's side
4477 	 and it evaluate to false, step over this breakpoint as well.  */
4478       if (gdb_breakpoint_here (pc)
4479 	  && gdb_condition_true_at_breakpoint (pc)
4480 	  && gdb_no_commands_at_breakpoint (pc))
4481 	{
4482 	  threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4483 				" GDB breakpoint at 0x%s; skipping step over",
4484 				lwpid_of (thread), paddress (pc));
4485 
4486 	  return false;
4487 	}
4488       else
4489 	{
4490 	  threads_debug_printf ("Need step over [LWP %ld]? yes, "
4491 				"found breakpoint at 0x%s",
4492 				lwpid_of (thread), paddress (pc));
4493 
4494 	  /* We've found an lwp that needs stepping over --- return 1 so
4495 	     that find_thread stops looking.  */
4496 	  return true;
4497 	}
4498     }
4499 
4500   threads_debug_printf
4501     ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4502      lwpid_of (thread), paddress (pc));
4503 
4504   return false;
4505 }
4506 
4507 void
4508 linux_process_target::start_step_over (lwp_info *lwp)
4509 {
4510   struct thread_info *thread = get_lwp_thread (lwp);
4511   CORE_ADDR pc;
4512 
4513   threads_debug_printf ("Starting step-over on LWP %ld.  Stopping all threads",
4514 			lwpid_of (thread));
4515 
4516   stop_all_lwps (1, lwp);
4517 
4518   if (lwp->suspended != 0)
4519     {
4520       internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4521 		      lwp->suspended);
4522     }
4523 
4524   threads_debug_printf ("Done stopping all threads for step-over.");
4525 
4526   /* Note, we should always reach here with an already adjusted PC,
4527      either by GDB (if we're resuming due to GDB's request), or by our
4528      caller, if we just finished handling an internal breakpoint GDB
4529      shouldn't care about.  */
4530   pc = get_pc (lwp);
4531 
4532   bool step = false;
4533   {
4534     scoped_restore_current_thread restore_thread;
4535     switch_to_thread (thread);
4536 
4537     lwp->bp_reinsert = pc;
4538     uninsert_breakpoints_at (pc);
4539     uninsert_fast_tracepoint_jumps_at (pc);
4540 
4541     step = single_step (lwp);
4542   }
4543 
4544   resume_one_lwp (lwp, step, 0, NULL);
4545 
4546   /* Require next event from this LWP.  */
4547   step_over_bkpt = thread->id;
4548 }
4549 
4550 bool
4551 linux_process_target::finish_step_over (lwp_info *lwp)
4552 {
4553   if (lwp->bp_reinsert != 0)
4554     {
4555       scoped_restore_current_thread restore_thread;
4556 
4557       threads_debug_printf ("Finished step over.");
4558 
4559       switch_to_thread (get_lwp_thread (lwp));
4560 
4561       /* Reinsert any breakpoint at LWP->BP_REINSERT.  Note that there
4562 	 may be no breakpoint to reinsert there by now.  */
4563       reinsert_breakpoints_at (lwp->bp_reinsert);
4564       reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4565 
4566       lwp->bp_reinsert = 0;
4567 
4568       /* Delete any single-step breakpoints.  No longer needed.  We
4569 	 don't have to worry about other threads hitting this trap,
4570 	 and later not being able to explain it, because we were
4571 	 stepping over a breakpoint, and we hold all threads but
4572 	 LWP stopped while doing that.  */
4573       if (!supports_hardware_single_step ())
4574 	{
4575 	  gdb_assert (has_single_step_breakpoints (current_thread));
4576 	  delete_single_step_breakpoints (current_thread);
4577 	}
4578 
4579       step_over_bkpt = null_ptid;
4580       return true;
4581     }
4582   else
4583     return false;
4584 }
4585 
4586 void
4587 linux_process_target::complete_ongoing_step_over ()
4588 {
4589   if (step_over_bkpt != null_ptid)
4590     {
4591       struct lwp_info *lwp;
4592       int wstat;
4593       int ret;
4594 
4595       threads_debug_printf ("detach: step over in progress, finish it first");
4596 
4597       /* Passing NULL_PTID as filter indicates we want all events to
4598 	 be left pending.  Eventually this returns when there are no
4599 	 unwaited-for children left.  */
4600       ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4601 				     __WALL);
4602       gdb_assert (ret == -1);
4603 
4604       lwp = find_lwp_pid (step_over_bkpt);
4605       if (lwp != NULL)
4606 	{
4607 	  finish_step_over (lwp);
4608 
4609 	  /* If we got our step SIGTRAP, don't leave it pending,
4610 	     otherwise we would report it to GDB as a spurious
4611 	     SIGTRAP.  */
4612 	  gdb_assert (lwp->status_pending_p);
4613 	  if (WIFSTOPPED (lwp->status_pending)
4614 	      && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4615 	    {
4616 	      thread_info *thread = get_lwp_thread (lwp);
4617 	      if (thread->last_resume_kind != resume_step)
4618 		{
4619 		  threads_debug_printf ("detach: discard step-over SIGTRAP");
4620 
4621 		  lwp->status_pending_p = 0;
4622 		  lwp->status_pending = 0;
4623 		  resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4624 		}
4625 	      else
4626 		threads_debug_printf
4627 		  ("detach: resume_step, not discarding step-over SIGTRAP");
4628 	    }
4629 	}
4630       step_over_bkpt = null_ptid;
4631       unsuspend_all_lwps (lwp);
4632     }
4633 }
4634 
4635 void
4636 linux_process_target::resume_one_thread (thread_info *thread,
4637 					 bool leave_all_stopped)
4638 {
4639   struct lwp_info *lwp = get_thread_lwp (thread);
4640   int leave_pending;
4641 
4642   if (lwp->resume == NULL)
4643     return;
4644 
4645   if (lwp->resume->kind == resume_stop)
4646     {
4647       threads_debug_printf ("resume_stop request for LWP %ld",
4648 			    lwpid_of (thread));
4649 
4650       if (!lwp->stopped)
4651 	{
4652 	  threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4653 
4654 	  /* Stop the thread, and wait for the event asynchronously,
4655 	     through the event loop.  */
4656 	  send_sigstop (lwp);
4657 	}
4658       else
4659 	{
4660 	  threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4661 
4662 	  /* The LWP may have been stopped in an internal event that
4663 	     was not meant to be notified back to GDB (e.g., gdbserver
4664 	     breakpoint), so we should be reporting a stop event in
4665 	     this case too.  */
4666 
4667 	  /* If the thread already has a pending SIGSTOP, this is a
4668 	     no-op.  Otherwise, something later will presumably resume
4669 	     the thread and this will cause it to cancel any pending
4670 	     operation, due to last_resume_kind == resume_stop.  If
4671 	     the thread already has a pending status to report, we
4672 	     will still report it the next time we wait - see
4673 	     status_pending_p_callback.  */
4674 
4675 	  /* If we already have a pending signal to report, then
4676 	     there's no need to queue a SIGSTOP, as this means we're
4677 	     midway through moving the LWP out of the jumppad, and we
4678 	     will report the pending signal as soon as that is
4679 	     finished.  */
4680 	  if (lwp->pending_signals_to_report.empty ())
4681 	    send_sigstop (lwp);
4682 	}
4683 
4684       /* For stop requests, we're done.  */
4685       lwp->resume = NULL;
4686       thread->last_status.set_ignore ();
4687       return;
4688     }
4689 
4690   /* If this thread which is about to be resumed has a pending status,
4691      then don't resume it - we can just report the pending status.
4692      Likewise if it is suspended, because e.g., another thread is
4693      stepping past a breakpoint.  Make sure to queue any signals that
4694      would otherwise be sent.  In all-stop mode, we do this decision
4695      based on if *any* thread has a pending status.  If there's a
4696      thread that needs the step-over-breakpoint dance, then don't
4697      resume any other thread but that particular one.  */
4698   leave_pending = (lwp->suspended
4699 		   || lwp->status_pending_p
4700 		   || leave_all_stopped);
4701 
4702   /* If we have a new signal, enqueue the signal.  */
4703   if (lwp->resume->sig != 0)
4704     {
4705       siginfo_t info, *info_p;
4706 
4707       /* If this is the same signal we were previously stopped by,
4708 	 make sure to queue its siginfo.  */
4709       if (WIFSTOPPED (lwp->last_status)
4710 	  && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4711 	  && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4712 		     (PTRACE_TYPE_ARG3) 0, &info) == 0)
4713 	info_p = &info;
4714       else
4715 	info_p = NULL;
4716 
4717       enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4718     }
4719 
4720   if (!leave_pending)
4721     {
4722       threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4723 
4724       proceed_one_lwp (thread, NULL);
4725     }
4726   else
4727     threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4728 
4729   thread->last_status.set_ignore ();
4730   lwp->resume = NULL;
4731 }
4732 
4733 void
4734 linux_process_target::resume (thread_resume *resume_info, size_t n)
4735 {
4736   struct thread_info *need_step_over = NULL;
4737 
4738  THREADS_SCOPED_DEBUG_ENTER_EXIT;
4739 
4740   for_each_thread ([&] (thread_info *thread)
4741     {
4742       linux_set_resume_request (thread, resume_info, n);
4743     });
4744 
4745   /* If there is a thread which would otherwise be resumed, which has
4746      a pending status, then don't resume any threads - we can just
4747      report the pending status.  Make sure to queue any signals that
4748      would otherwise be sent.  In non-stop mode, we'll apply this
4749      logic to each thread individually.  We consume all pending events
4750      before considering to start a step-over (in all-stop).  */
4751   bool any_pending = false;
4752   if (!non_stop)
4753     any_pending = find_thread ([this] (thread_info *thread)
4754 		    {
4755 		      return resume_status_pending (thread);
4756 		    }) != nullptr;
4757 
4758   /* If there is a thread which would otherwise be resumed, which is
4759      stopped at a breakpoint that needs stepping over, then don't
4760      resume any threads - have it step over the breakpoint with all
4761      other threads stopped, then resume all threads again.  Make sure
4762      to queue any signals that would otherwise be delivered or
4763      queued.  */
4764   if (!any_pending && low_supports_breakpoints ())
4765     need_step_over = find_thread ([this] (thread_info *thread)
4766 		       {
4767 			 return thread_needs_step_over (thread);
4768 		       });
4769 
4770   bool leave_all_stopped = (need_step_over != NULL || any_pending);
4771 
4772   if (need_step_over != NULL)
4773     threads_debug_printf ("Not resuming all, need step over");
4774   else if (any_pending)
4775     threads_debug_printf ("Not resuming, all-stop and found "
4776 			  "an LWP with pending status");
4777   else
4778     threads_debug_printf ("Resuming, no pending status or step over needed");
4779 
4780   /* Even if we're leaving threads stopped, queue all signals we'd
4781      otherwise deliver.  */
4782   for_each_thread ([&] (thread_info *thread)
4783     {
4784       resume_one_thread (thread, leave_all_stopped);
4785     });
4786 
4787   if (need_step_over)
4788     start_step_over (get_thread_lwp (need_step_over));
4789 
4790   /* We may have events that were pending that can/should be sent to
4791      the client now.  Trigger a linux_wait call.  */
4792   if (target_is_async_p ())
4793     async_file_mark ();
4794 }
4795 
4796 void
4797 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4798 {
4799   struct lwp_info *lwp = get_thread_lwp (thread);
4800   int step;
4801 
4802   if (lwp == except)
4803     return;
4804 
4805   threads_debug_printf ("lwp %ld", lwpid_of (thread));
4806 
4807   if (!lwp->stopped)
4808     {
4809       threads_debug_printf ("   LWP %ld already running", lwpid_of (thread));
4810       return;
4811     }
4812 
4813   if (thread->last_resume_kind == resume_stop
4814       && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4815     {
4816       threads_debug_printf ("   client wants LWP to remain %ld stopped",
4817 			    lwpid_of (thread));
4818       return;
4819     }
4820 
4821   if (lwp->status_pending_p)
4822     {
4823       threads_debug_printf ("   LWP %ld has pending status, leaving stopped",
4824 			    lwpid_of (thread));
4825       return;
4826     }
4827 
4828   gdb_assert (lwp->suspended >= 0);
4829 
4830   if (lwp->suspended)
4831     {
4832       threads_debug_printf ("   LWP %ld is suspended", lwpid_of (thread));
4833       return;
4834     }
4835 
4836   if (thread->last_resume_kind == resume_stop
4837       && lwp->pending_signals_to_report.empty ()
4838       && (lwp->collecting_fast_tracepoint
4839 	  == fast_tpoint_collect_result::not_collecting))
4840     {
4841       /* We haven't reported this LWP as stopped yet (otherwise, the
4842 	 last_status.kind check above would catch it, and we wouldn't
4843 	 reach here.  This LWP may have been momentarily paused by a
4844 	 stop_all_lwps call while handling for example, another LWP's
4845 	 step-over.  In that case, the pending expected SIGSTOP signal
4846 	 that was queued at vCont;t handling time will have already
4847 	 been consumed by wait_for_sigstop, and so we need to requeue
4848 	 another one here.  Note that if the LWP already has a SIGSTOP
4849 	 pending, this is a no-op.  */
4850 
4851       threads_debug_printf
4852 	("Client wants LWP %ld to stop.  Making sure it has a SIGSTOP pending",
4853 	 lwpid_of (thread));
4854 
4855       send_sigstop (lwp);
4856     }
4857 
4858   if (thread->last_resume_kind == resume_step)
4859     {
4860       threads_debug_printf ("   stepping LWP %ld, client wants it stepping",
4861 			    lwpid_of (thread));
4862 
4863       /* If resume_step is requested by GDB, install single-step
4864 	 breakpoints when the thread is about to be actually resumed if
4865 	 the single-step breakpoints weren't removed.  */
4866       if (supports_software_single_step ()
4867 	  && !has_single_step_breakpoints (thread))
4868 	install_software_single_step_breakpoints (lwp);
4869 
4870       step = maybe_hw_step (thread);
4871     }
4872   else if (lwp->bp_reinsert != 0)
4873     {
4874       threads_debug_printf ("   stepping LWP %ld, reinsert set",
4875 			    lwpid_of (thread));
4876 
4877       step = maybe_hw_step (thread);
4878     }
4879   else
4880     step = 0;
4881 
4882   resume_one_lwp (lwp, step, 0, NULL);
4883 }
4884 
4885 void
4886 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4887 						     lwp_info *except)
4888 {
4889   struct lwp_info *lwp = get_thread_lwp (thread);
4890 
4891   if (lwp == except)
4892     return;
4893 
4894   lwp_suspended_decr (lwp);
4895 
4896   proceed_one_lwp (thread, except);
4897 }
4898 
4899 void
4900 linux_process_target::proceed_all_lwps ()
4901 {
4902   struct thread_info *need_step_over;
4903 
4904   /* If there is a thread which would otherwise be resumed, which is
4905      stopped at a breakpoint that needs stepping over, then don't
4906      resume any threads - have it step over the breakpoint with all
4907      other threads stopped, then resume all threads again.  */
4908 
4909   if (low_supports_breakpoints ())
4910     {
4911       need_step_over = find_thread ([this] (thread_info *thread)
4912 			 {
4913 			   return thread_needs_step_over (thread);
4914 			 });
4915 
4916       if (need_step_over != NULL)
4917 	{
4918 	  threads_debug_printf ("found thread %ld needing a step-over",
4919 				lwpid_of (need_step_over));
4920 
4921 	  start_step_over (get_thread_lwp (need_step_over));
4922 	  return;
4923 	}
4924     }
4925 
4926   threads_debug_printf ("Proceeding, no step-over needed");
4927 
4928   for_each_thread ([this] (thread_info *thread)
4929     {
4930       proceed_one_lwp (thread, NULL);
4931     });
4932 }
4933 
4934 void
4935 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4936 {
4937   THREADS_SCOPED_DEBUG_ENTER_EXIT;
4938 
4939   if (except)
4940     threads_debug_printf ("except=(LWP %ld)",
4941 		  lwpid_of (get_lwp_thread (except)));
4942   else
4943     threads_debug_printf ("except=nullptr");
4944 
4945   if (unsuspend)
4946     for_each_thread ([&] (thread_info *thread)
4947       {
4948 	unsuspend_and_proceed_one_lwp (thread, except);
4949       });
4950   else
4951     for_each_thread ([&] (thread_info *thread)
4952       {
4953 	proceed_one_lwp (thread, except);
4954       });
4955 }
4956 
4957 
4958 #ifdef HAVE_LINUX_REGSETS
4959 
4960 #define use_linux_regsets 1
4961 
4962 /* Returns true if REGSET has been disabled.  */
4963 
4964 static int
4965 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4966 {
4967   return (info->disabled_regsets != NULL
4968 	  && info->disabled_regsets[regset - info->regsets]);
4969 }
4970 
4971 /* Disable REGSET.  */
4972 
4973 static void
4974 disable_regset (struct regsets_info *info, struct regset_info *regset)
4975 {
4976   int dr_offset;
4977 
4978   dr_offset = regset - info->regsets;
4979   if (info->disabled_regsets == NULL)
4980     info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4981   info->disabled_regsets[dr_offset] = 1;
4982 }
4983 
4984 static int
4985 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4986 				  struct regcache *regcache)
4987 {
4988   struct regset_info *regset;
4989   int saw_general_regs = 0;
4990   int pid;
4991   struct iovec iov;
4992 
4993   pid = lwpid_of (current_thread);
4994   for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4995     {
4996       void *buf, *data;
4997       int nt_type, res;
4998 
4999       if (regset->size == 0 || regset_disabled (regsets_info, regset))
5000 	continue;
5001 
5002       buf = xmalloc (regset->size);
5003 
5004       nt_type = regset->nt_type;
5005       if (nt_type)
5006 	{
5007 	  iov.iov_base = buf;
5008 	  iov.iov_len = regset->size;
5009 	  data = (void *) &iov;
5010 	}
5011       else
5012 	data = buf;
5013 
5014 #ifndef __sparc__
5015       res = ptrace (regset->get_request, pid,
5016 		    (PTRACE_TYPE_ARG3) (long) nt_type, data);
5017 #else
5018       res = ptrace (regset->get_request, pid, data, nt_type);
5019 #endif
5020       if (res < 0)
5021 	{
5022 	  if (errno == EIO
5023 	      || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5024 	    {
5025 	      /* If we get EIO on a regset, or an EINVAL and the regset is
5026 		 optional, do not try it again for this process mode.  */
5027 	      disable_regset (regsets_info, regset);
5028 	    }
5029 	  else if (errno == ENODATA)
5030 	    {
5031 	      /* ENODATA may be returned if the regset is currently
5032 		 not "active".  This can happen in normal operation,
5033 		 so suppress the warning in this case.  */
5034 	    }
5035 	  else if (errno == ESRCH)
5036 	    {
5037 	      /* At this point, ESRCH should mean the process is
5038 		 already gone, in which case we simply ignore attempts
5039 		 to read its registers.  */
5040 	    }
5041 	  else
5042 	    {
5043 	      char s[256];
5044 	      sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5045 		       pid);
5046 	      perror (s);
5047 	    }
5048 	}
5049       else
5050 	{
5051 	  if (regset->type == GENERAL_REGS)
5052 	    saw_general_regs = 1;
5053 	  regset->store_function (regcache, buf);
5054 	}
5055       free (buf);
5056     }
5057   if (saw_general_regs)
5058     return 0;
5059   else
5060     return 1;
5061 }
5062 
5063 static int
5064 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5065 				  struct regcache *regcache)
5066 {
5067   struct regset_info *regset;
5068   int saw_general_regs = 0;
5069   int pid;
5070   struct iovec iov;
5071 
5072   pid = lwpid_of (current_thread);
5073   for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5074     {
5075       void *buf, *data;
5076       int nt_type, res;
5077 
5078       if (regset->size == 0 || regset_disabled (regsets_info, regset)
5079 	  || regset->fill_function == NULL)
5080 	continue;
5081 
5082       buf = xmalloc (regset->size);
5083 
5084       /* First fill the buffer with the current register set contents,
5085 	 in case there are any items in the kernel's regset that are
5086 	 not in gdbserver's regcache.  */
5087 
5088       nt_type = regset->nt_type;
5089       if (nt_type)
5090 	{
5091 	  iov.iov_base = buf;
5092 	  iov.iov_len = regset->size;
5093 	  data = (void *) &iov;
5094 	}
5095       else
5096 	data = buf;
5097 
5098 #ifndef __sparc__
5099       res = ptrace (regset->get_request, pid,
5100 		    (PTRACE_TYPE_ARG3) (long) nt_type, data);
5101 #else
5102       res = ptrace (regset->get_request, pid, data, nt_type);
5103 #endif
5104 
5105       if (res == 0)
5106 	{
5107 	  /* Then overlay our cached registers on that.  */
5108 	  regset->fill_function (regcache, buf);
5109 
5110 	  /* Only now do we write the register set.  */
5111 #ifndef __sparc__
5112 	  res = ptrace (regset->set_request, pid,
5113 			(PTRACE_TYPE_ARG3) (long) nt_type, data);
5114 #else
5115 	  res = ptrace (regset->set_request, pid, data, nt_type);
5116 #endif
5117 	}
5118 
5119       if (res < 0)
5120 	{
5121 	  if (errno == EIO
5122 	      || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5123 	    {
5124 	      /* If we get EIO on a regset, or an EINVAL and the regset is
5125 		 optional, do not try it again for this process mode.  */
5126 	      disable_regset (regsets_info, regset);
5127 	    }
5128 	  else if (errno == ESRCH)
5129 	    {
5130 	      /* At this point, ESRCH should mean the process is
5131 		 already gone, in which case we simply ignore attempts
5132 		 to change its registers.  See also the related
5133 		 comment in resume_one_lwp.  */
5134 	      free (buf);
5135 	      return 0;
5136 	    }
5137 	  else
5138 	    {
5139 	      perror ("Warning: ptrace(regsets_store_inferior_registers)");
5140 	    }
5141 	}
5142       else if (regset->type == GENERAL_REGS)
5143 	saw_general_regs = 1;
5144       free (buf);
5145     }
5146   if (saw_general_regs)
5147     return 0;
5148   else
5149     return 1;
5150 }
5151 
5152 #else /* !HAVE_LINUX_REGSETS */
5153 
5154 #define use_linux_regsets 0
5155 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5156 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5157 
5158 #endif
5159 
5160 /* Return 1 if register REGNO is supported by one of the regset ptrace
5161    calls or 0 if it has to be transferred individually.  */
5162 
5163 static int
5164 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5165 {
5166   unsigned char mask = 1 << (regno % 8);
5167   size_t index = regno / 8;
5168 
5169   return (use_linux_regsets
5170 	  && (regs_info->regset_bitmap == NULL
5171 	      || (regs_info->regset_bitmap[index] & mask) != 0));
5172 }
5173 
5174 #ifdef HAVE_LINUX_USRREGS
5175 
5176 static int
5177 register_addr (const struct usrregs_info *usrregs, int regnum)
5178 {
5179   int addr;
5180 
5181   if (regnum < 0 || regnum >= usrregs->num_regs)
5182     error ("Invalid register number %d.", regnum);
5183 
5184   addr = usrregs->regmap[regnum];
5185 
5186   return addr;
5187 }
5188 
5189 
5190 void
5191 linux_process_target::fetch_register (const usrregs_info *usrregs,
5192 				      regcache *regcache, int regno)
5193 {
5194   CORE_ADDR regaddr;
5195   int i, size;
5196   char *buf;
5197   int pid;
5198 
5199   if (regno >= usrregs->num_regs)
5200     return;
5201   if (low_cannot_fetch_register (regno))
5202     return;
5203 
5204   regaddr = register_addr (usrregs, regno);
5205   if (regaddr == -1)
5206     return;
5207 
5208   size = ((register_size (regcache->tdesc, regno)
5209 	   + sizeof (PTRACE_XFER_TYPE) - 1)
5210 	  & -sizeof (PTRACE_XFER_TYPE));
5211   buf = (char *) alloca (size);
5212 
5213   pid = lwpid_of (current_thread);
5214   for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5215     {
5216       errno = 0;
5217       *(PTRACE_XFER_TYPE *) (buf + i) =
5218 	ptrace (PTRACE_PEEKUSER, pid,
5219 		/* Coerce to a uintptr_t first to avoid potential gcc warning
5220 		   of coercing an 8 byte integer to a 4 byte pointer.  */
5221 		(PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5222       regaddr += sizeof (PTRACE_XFER_TYPE);
5223       if (errno != 0)
5224 	{
5225 	  /* Mark register REGNO unavailable.  */
5226 	  supply_register (regcache, regno, NULL);
5227 	  return;
5228 	}
5229     }
5230 
5231   low_supply_ptrace_register (regcache, regno, buf);
5232 }
5233 
5234 void
5235 linux_process_target::store_register (const usrregs_info *usrregs,
5236 				      regcache *regcache, int regno)
5237 {
5238   CORE_ADDR regaddr;
5239   int i, size;
5240   char *buf;
5241   int pid;
5242 
5243   if (regno >= usrregs->num_regs)
5244     return;
5245   if (low_cannot_store_register (regno))
5246     return;
5247 
5248   regaddr = register_addr (usrregs, regno);
5249   if (regaddr == -1)
5250     return;
5251 
5252   size = ((register_size (regcache->tdesc, regno)
5253 	   + sizeof (PTRACE_XFER_TYPE) - 1)
5254 	  & -sizeof (PTRACE_XFER_TYPE));
5255   buf = (char *) alloca (size);
5256   memset (buf, 0, size);
5257 
5258   low_collect_ptrace_register (regcache, regno, buf);
5259 
5260   pid = lwpid_of (current_thread);
5261   for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5262     {
5263       errno = 0;
5264       ptrace (PTRACE_POKEUSER, pid,
5265 	    /* Coerce to a uintptr_t first to avoid potential gcc warning
5266 	       about coercing an 8 byte integer to a 4 byte pointer.  */
5267 	      (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5268 	      (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5269       if (errno != 0)
5270 	{
5271 	  /* At this point, ESRCH should mean the process is
5272 	     already gone, in which case we simply ignore attempts
5273 	     to change its registers.  See also the related
5274 	     comment in resume_one_lwp.  */
5275 	  if (errno == ESRCH)
5276 	    return;
5277 
5278 
5279 	  if (!low_cannot_store_register (regno))
5280 	    error ("writing register %d: %s", regno, safe_strerror (errno));
5281 	}
5282       regaddr += sizeof (PTRACE_XFER_TYPE);
5283     }
5284 }
5285 #endif /* HAVE_LINUX_USRREGS */
5286 
5287 void
5288 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5289 						   int regno, char *buf)
5290 {
5291   collect_register (regcache, regno, buf);
5292 }
5293 
5294 void
5295 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5296 						  int regno, const char *buf)
5297 {
5298   supply_register (regcache, regno, buf);
5299 }
5300 
5301 void
5302 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5303 						    regcache *regcache,
5304 						    int regno, int all)
5305 {
5306 #ifdef HAVE_LINUX_USRREGS
5307   struct usrregs_info *usr = regs_info->usrregs;
5308 
5309   if (regno == -1)
5310     {
5311       for (regno = 0; regno < usr->num_regs; regno++)
5312 	if (all || !linux_register_in_regsets (regs_info, regno))
5313 	  fetch_register (usr, regcache, regno);
5314     }
5315   else
5316     fetch_register (usr, regcache, regno);
5317 #endif
5318 }
5319 
5320 void
5321 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5322 						    regcache *regcache,
5323 						    int regno, int all)
5324 {
5325 #ifdef HAVE_LINUX_USRREGS
5326   struct usrregs_info *usr = regs_info->usrregs;
5327 
5328   if (regno == -1)
5329     {
5330       for (regno = 0; regno < usr->num_regs; regno++)
5331 	if (all || !linux_register_in_regsets (regs_info, regno))
5332 	  store_register (usr, regcache, regno);
5333     }
5334   else
5335     store_register (usr, regcache, regno);
5336 #endif
5337 }
5338 
5339 void
5340 linux_process_target::fetch_registers (regcache *regcache, int regno)
5341 {
5342   int use_regsets;
5343   int all = 0;
5344   const regs_info *regs_info = get_regs_info ();
5345 
5346   if (regno == -1)
5347     {
5348       if (regs_info->usrregs != NULL)
5349 	for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5350 	  low_fetch_register (regcache, regno);
5351 
5352       all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5353       if (regs_info->usrregs != NULL)
5354 	usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5355     }
5356   else
5357     {
5358       if (low_fetch_register (regcache, regno))
5359 	return;
5360 
5361       use_regsets = linux_register_in_regsets (regs_info, regno);
5362       if (use_regsets)
5363 	all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5364 						regcache);
5365       if ((!use_regsets || all) && regs_info->usrregs != NULL)
5366 	usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5367     }
5368 }
5369 
5370 void
5371 linux_process_target::store_registers (regcache *regcache, int regno)
5372 {
5373   int use_regsets;
5374   int all = 0;
5375   const regs_info *regs_info = get_regs_info ();
5376 
5377   if (regno == -1)
5378     {
5379       all = regsets_store_inferior_registers (regs_info->regsets_info,
5380 					      regcache);
5381       if (regs_info->usrregs != NULL)
5382 	usr_store_inferior_registers (regs_info, regcache, regno, all);
5383     }
5384   else
5385     {
5386       use_regsets = linux_register_in_regsets (regs_info, regno);
5387       if (use_regsets)
5388 	all = regsets_store_inferior_registers (regs_info->regsets_info,
5389 						regcache);
5390       if ((!use_regsets || all) && regs_info->usrregs != NULL)
5391 	usr_store_inferior_registers (regs_info, regcache, regno, 1);
5392     }
5393 }
5394 
5395 bool
5396 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5397 {
5398   return false;
5399 }
5400 
5401 /* A wrapper for the read_memory target op.  */
5402 
5403 static int
5404 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5405 {
5406   return the_target->read_memory (memaddr, myaddr, len);
5407 }
5408 
5409 
5410 /* Helper for read_memory/write_memory using /proc/PID/mem.  Because
5411    we can use a single read/write call, this can be much more
5412    efficient than banging away at PTRACE_PEEKTEXT.  Also, unlike
5413    PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5414    One an only one of READBUF and WRITEBUF is non-null.  If READBUF is
5415    not null, then we're reading, otherwise we're writing.  */
5416 
5417 static int
5418 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5419 		  const gdb_byte *writebuf, int len)
5420 {
5421   gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5422 
5423   process_info *proc = current_process ();
5424 
5425   int fd = proc->priv->mem_fd;
5426   if (fd == -1)
5427     return EIO;
5428 
5429   while (len > 0)
5430     {
5431       int bytes;
5432 
5433       /* Use pread64/pwrite64 if available, since they save a syscall
5434 	 and can handle 64-bit offsets even on 32-bit platforms (for
5435 	 instance, SPARC debugging a SPARC64 application).  But only
5436 	 use them if the offset isn't so high that when cast to off_t
5437 	 it'd be negative, as seen on SPARC64.  pread64/pwrite64
5438 	 outright reject such offsets.  lseek does not.  */
5439 #ifdef HAVE_PREAD64
5440       if ((off_t) memaddr >= 0)
5441 	bytes = (readbuf != nullptr
5442 		 ? pread64 (fd, readbuf, len, memaddr)
5443 		 : pwrite64 (fd, writebuf, len, memaddr));
5444       else
5445 #endif
5446 	{
5447 	  bytes = -1;
5448 	  if (lseek (fd, memaddr, SEEK_SET) != -1)
5449 	    bytes = (readbuf != nullptr
5450 		     ? read (fd, readbuf, len)
5451 		     : write (fd, writebuf, len));
5452 	}
5453 
5454       if (bytes < 0)
5455 	return errno;
5456       else if (bytes == 0)
5457 	{
5458 	  /* EOF means the address space is gone, the whole process
5459 	     exited or execed.  */
5460 	  return EIO;
5461 	}
5462 
5463       memaddr += bytes;
5464       if (readbuf != nullptr)
5465 	readbuf += bytes;
5466       else
5467 	writebuf += bytes;
5468       len -= bytes;
5469     }
5470 
5471   return 0;
5472 }
5473 
5474 int
5475 linux_process_target::read_memory (CORE_ADDR memaddr,
5476 				   unsigned char *myaddr, int len)
5477 {
5478   return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5479 }
5480 
5481 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5482    memory at MEMADDR.  On failure (cannot write to the inferior)
5483    returns the value of errno.  Always succeeds if LEN is zero.  */
5484 
5485 int
5486 linux_process_target::write_memory (CORE_ADDR memaddr,
5487 				    const unsigned char *myaddr, int len)
5488 {
5489   if (debug_threads)
5490     {
5491       /* Dump up to four bytes.  */
5492       char str[4 * 2 + 1];
5493       char *p = str;
5494       int dump = len < 4 ? len : 4;
5495 
5496       for (int i = 0; i < dump; i++)
5497 	{
5498 	  sprintf (p, "%02x", myaddr[i]);
5499 	  p += 2;
5500 	}
5501       *p = '\0';
5502 
5503       threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5504 			    str, (long) memaddr, current_process ()->pid);
5505     }
5506 
5507   return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5508 }
5509 
5510 void
5511 linux_process_target::look_up_symbols ()
5512 {
5513 #ifdef USE_THREAD_DB
5514   struct process_info *proc = current_process ();
5515 
5516   if (proc->priv->thread_db != NULL)
5517     return;
5518 
5519   thread_db_init ();
5520 #endif
5521 }
5522 
5523 void
5524 linux_process_target::request_interrupt ()
5525 {
5526   /* Send a SIGINT to the process group.  This acts just like the user
5527      typed a ^C on the controlling terminal.  */
5528   int res = ::kill (-signal_pid, SIGINT);
5529   if (res == -1)
5530     warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5531 	     signal_pid, safe_strerror (errno));
5532 }
5533 
5534 bool
5535 linux_process_target::supports_read_auxv ()
5536 {
5537   return true;
5538 }
5539 
5540 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5541    to debugger memory starting at MYADDR.  */
5542 
5543 int
5544 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5545 				 unsigned char *myaddr, unsigned int len)
5546 {
5547   char filename[PATH_MAX];
5548   int fd, n;
5549 
5550   xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5551 
5552   fd = open (filename, O_RDONLY);
5553   if (fd < 0)
5554     return -1;
5555 
5556   if (offset != (CORE_ADDR) 0
5557       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5558     n = -1;
5559   else
5560     n = read (fd, myaddr, len);
5561 
5562   close (fd);
5563 
5564   return n;
5565 }
5566 
5567 int
5568 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5569 				    int size, raw_breakpoint *bp)
5570 {
5571   if (type == raw_bkpt_type_sw)
5572     return insert_memory_breakpoint (bp);
5573   else
5574     return low_insert_point (type, addr, size, bp);
5575 }
5576 
5577 int
5578 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5579 					int size, raw_breakpoint *bp)
5580 {
5581   /* Unsupported (see target.h).  */
5582   return 1;
5583 }
5584 
5585 int
5586 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5587 				    int size, raw_breakpoint *bp)
5588 {
5589   if (type == raw_bkpt_type_sw)
5590     return remove_memory_breakpoint (bp);
5591   else
5592     return low_remove_point (type, addr, size, bp);
5593 }
5594 
5595 int
5596 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5597 					int size, raw_breakpoint *bp)
5598 {
5599   /* Unsupported (see target.h).  */
5600   return 1;
5601 }
5602 
5603 /* Implement the stopped_by_sw_breakpoint target_ops
5604    method.  */
5605 
5606 bool
5607 linux_process_target::stopped_by_sw_breakpoint ()
5608 {
5609   struct lwp_info *lwp = get_thread_lwp (current_thread);
5610 
5611   return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5612 }
5613 
5614 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5615    method.  */
5616 
5617 bool
5618 linux_process_target::supports_stopped_by_sw_breakpoint ()
5619 {
5620   return true;
5621 }
5622 
5623 /* Implement the stopped_by_hw_breakpoint target_ops
5624    method.  */
5625 
5626 bool
5627 linux_process_target::stopped_by_hw_breakpoint ()
5628 {
5629   struct lwp_info *lwp = get_thread_lwp (current_thread);
5630 
5631   return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5632 }
5633 
5634 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5635    method.  */
5636 
5637 bool
5638 linux_process_target::supports_stopped_by_hw_breakpoint ()
5639 {
5640   return true;
5641 }
5642 
5643 /* Implement the supports_hardware_single_step target_ops method.  */
5644 
5645 bool
5646 linux_process_target::supports_hardware_single_step ()
5647 {
5648   return true;
5649 }
5650 
5651 bool
5652 linux_process_target::stopped_by_watchpoint ()
5653 {
5654   struct lwp_info *lwp = get_thread_lwp (current_thread);
5655 
5656   return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5657 }
5658 
5659 CORE_ADDR
5660 linux_process_target::stopped_data_address ()
5661 {
5662   struct lwp_info *lwp = get_thread_lwp (current_thread);
5663 
5664   return lwp->stopped_data_address;
5665 }
5666 
5667 /* This is only used for targets that define PT_TEXT_ADDR,
5668    PT_DATA_ADDR and PT_TEXT_END_ADDR.  If those are not defined, supposedly
5669    the target has different ways of acquiring this information, like
5670    loadmaps.  */
5671 
5672 bool
5673 linux_process_target::supports_read_offsets ()
5674 {
5675 #ifdef SUPPORTS_READ_OFFSETS
5676   return true;
5677 #else
5678   return false;
5679 #endif
5680 }
5681 
5682 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5683    to tell gdb about.  */
5684 
5685 int
5686 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5687 {
5688 #ifdef SUPPORTS_READ_OFFSETS
5689   unsigned long text, text_end, data;
5690   int pid = lwpid_of (current_thread);
5691 
5692   errno = 0;
5693 
5694   text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5695 		 (PTRACE_TYPE_ARG4) 0);
5696   text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5697 		     (PTRACE_TYPE_ARG4) 0);
5698   data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5699 		 (PTRACE_TYPE_ARG4) 0);
5700 
5701   if (errno == 0)
5702     {
5703       /* Both text and data offsets produced at compile-time (and so
5704 	 used by gdb) are relative to the beginning of the program,
5705 	 with the data segment immediately following the text segment.
5706 	 However, the actual runtime layout in memory may put the data
5707 	 somewhere else, so when we send gdb a data base-address, we
5708 	 use the real data base address and subtract the compile-time
5709 	 data base-address from it (which is just the length of the
5710 	 text segment).  BSS immediately follows data in both
5711 	 cases.  */
5712       *text_p = text;
5713       *data_p = data - (text_end - text);
5714 
5715       return 1;
5716     }
5717   return 0;
5718 #else
5719   gdb_assert_not_reached ("target op read_offsets not supported");
5720 #endif
5721 }
5722 
5723 bool
5724 linux_process_target::supports_get_tls_address ()
5725 {
5726 #ifdef USE_THREAD_DB
5727   return true;
5728 #else
5729   return false;
5730 #endif
5731 }
5732 
5733 int
5734 linux_process_target::get_tls_address (thread_info *thread,
5735 				       CORE_ADDR offset,
5736 				       CORE_ADDR load_module,
5737 				       CORE_ADDR *address)
5738 {
5739 #ifdef USE_THREAD_DB
5740   return thread_db_get_tls_address (thread, offset, load_module, address);
5741 #else
5742   return -1;
5743 #endif
5744 }
5745 
5746 bool
5747 linux_process_target::supports_qxfer_osdata ()
5748 {
5749   return true;
5750 }
5751 
5752 int
5753 linux_process_target::qxfer_osdata (const char *annex,
5754 				    unsigned char *readbuf,
5755 				    unsigned const char *writebuf,
5756 				    CORE_ADDR offset, int len)
5757 {
5758   return linux_common_xfer_osdata (annex, readbuf, offset, len);
5759 }
5760 
5761 void
5762 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5763 				     gdb_byte *inf_siginfo, int direction)
5764 {
5765   bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5766 
5767   /* If there was no callback, or the callback didn't do anything,
5768      then just do a straight memcpy.  */
5769   if (!done)
5770     {
5771       if (direction == 1)
5772 	memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5773       else
5774 	memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5775     }
5776 }
5777 
5778 bool
5779 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5780 					 int direction)
5781 {
5782   return false;
5783 }
5784 
5785 bool
5786 linux_process_target::supports_qxfer_siginfo ()
5787 {
5788   return true;
5789 }
5790 
5791 int
5792 linux_process_target::qxfer_siginfo (const char *annex,
5793 				     unsigned char *readbuf,
5794 				     unsigned const char *writebuf,
5795 				     CORE_ADDR offset, int len)
5796 {
5797   int pid;
5798   siginfo_t siginfo;
5799   gdb_byte inf_siginfo[sizeof (siginfo_t)];
5800 
5801   if (current_thread == NULL)
5802     return -1;
5803 
5804   pid = lwpid_of (current_thread);
5805 
5806   threads_debug_printf ("%s siginfo for lwp %d.",
5807 			readbuf != NULL ? "Reading" : "Writing",
5808 			pid);
5809 
5810   if (offset >= sizeof (siginfo))
5811     return -1;
5812 
5813   if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5814     return -1;
5815 
5816   /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5817      SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
5818      inferior with a 64-bit GDBSERVER should look the same as debugging it
5819      with a 32-bit GDBSERVER, we need to convert it.  */
5820   siginfo_fixup (&siginfo, inf_siginfo, 0);
5821 
5822   if (offset + len > sizeof (siginfo))
5823     len = sizeof (siginfo) - offset;
5824 
5825   if (readbuf != NULL)
5826     memcpy (readbuf, inf_siginfo + offset, len);
5827   else
5828     {
5829       memcpy (inf_siginfo + offset, writebuf, len);
5830 
5831       /* Convert back to ptrace layout before flushing it out.  */
5832       siginfo_fixup (&siginfo, inf_siginfo, 1);
5833 
5834       if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5835 	return -1;
5836     }
5837 
5838   return len;
5839 }
5840 
5841 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5842    so we notice when children change state; as the handler for the
5843    sigsuspend in my_waitpid.  */
5844 
5845 static void
5846 sigchld_handler (int signo)
5847 {
5848   int old_errno = errno;
5849 
5850   if (debug_threads)
5851     {
5852       do
5853 	{
5854 	  /* Use the async signal safe debug function.  */
5855 	  if (debug_write ("sigchld_handler\n",
5856 			   sizeof ("sigchld_handler\n") - 1) < 0)
5857 	    break; /* just ignore */
5858 	} while (0);
5859     }
5860 
5861   if (target_is_async_p ())
5862     async_file_mark (); /* trigger a linux_wait */
5863 
5864   errno = old_errno;
5865 }
5866 
5867 bool
5868 linux_process_target::supports_non_stop ()
5869 {
5870   return true;
5871 }
5872 
5873 bool
5874 linux_process_target::async (bool enable)
5875 {
5876   bool previous = target_is_async_p ();
5877 
5878   threads_debug_printf ("async (%d), previous=%d",
5879 			enable, previous);
5880 
5881   if (previous != enable)
5882     {
5883       sigset_t mask;
5884       sigemptyset (&mask);
5885       sigaddset (&mask, SIGCHLD);
5886 
5887       gdb_sigmask (SIG_BLOCK, &mask, NULL);
5888 
5889       if (enable)
5890 	{
5891 	  if (!linux_event_pipe.open_pipe ())
5892 	    {
5893 	      gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5894 
5895 	      warning ("creating event pipe failed.");
5896 	      return previous;
5897 	    }
5898 
5899 	  /* Register the event loop handler.  */
5900 	  add_file_handler (linux_event_pipe.event_fd (),
5901 			    handle_target_event, NULL,
5902 			    "linux-low");
5903 
5904 	  /* Always trigger a linux_wait.  */
5905 	  async_file_mark ();
5906 	}
5907       else
5908 	{
5909 	  delete_file_handler (linux_event_pipe.event_fd ());
5910 
5911 	  linux_event_pipe.close_pipe ();
5912 	}
5913 
5914       gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5915     }
5916 
5917   return previous;
5918 }
5919 
5920 int
5921 linux_process_target::start_non_stop (bool nonstop)
5922 {
5923   /* Register or unregister from event-loop accordingly.  */
5924   target_async (nonstop);
5925 
5926   if (target_is_async_p () != (nonstop != false))
5927     return -1;
5928 
5929   return 0;
5930 }
5931 
5932 bool
5933 linux_process_target::supports_multi_process ()
5934 {
5935   return true;
5936 }
5937 
5938 /* Check if fork events are supported.  */
5939 
5940 bool
5941 linux_process_target::supports_fork_events ()
5942 {
5943   return true;
5944 }
5945 
5946 /* Check if vfork events are supported.  */
5947 
5948 bool
5949 linux_process_target::supports_vfork_events ()
5950 {
5951   return true;
5952 }
5953 
5954 /* Return the set of supported thread options.  */
5955 
5956 gdb_thread_options
5957 linux_process_target::supported_thread_options ()
5958 {
5959   return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5960 }
5961 
5962 /* Check if exec events are supported.  */
5963 
5964 bool
5965 linux_process_target::supports_exec_events ()
5966 {
5967   return true;
5968 }
5969 
5970 /* Target hook for 'handle_new_gdb_connection'.  Causes a reset of the
5971    ptrace flags for all inferiors.  This is in case the new GDB connection
5972    doesn't support the same set of events that the previous one did.  */
5973 
5974 void
5975 linux_process_target::handle_new_gdb_connection ()
5976 {
5977   /* Request that all the lwps reset their ptrace options.  */
5978   for_each_thread ([] (thread_info *thread)
5979     {
5980       struct lwp_info *lwp = get_thread_lwp (thread);
5981 
5982       if (!lwp->stopped)
5983 	{
5984 	  /* Stop the lwp so we can modify its ptrace options.  */
5985 	  lwp->must_set_ptrace_flags = 1;
5986 	  linux_stop_lwp (lwp);
5987 	}
5988       else
5989 	{
5990 	  /* Already stopped; go ahead and set the ptrace options.  */
5991 	  struct process_info *proc = find_process_pid (pid_of (thread));
5992 	  int options = linux_low_ptrace_options (proc->attached);
5993 
5994 	  linux_enable_event_reporting (lwpid_of (thread), options);
5995 	  lwp->must_set_ptrace_flags = 0;
5996 	}
5997     });
5998 }
5999 
6000 int
6001 linux_process_target::handle_monitor_command (char *mon)
6002 {
6003 #ifdef USE_THREAD_DB
6004   return thread_db_handle_monitor_command (mon);
6005 #else
6006   return 0;
6007 #endif
6008 }
6009 
6010 int
6011 linux_process_target::core_of_thread (ptid_t ptid)
6012 {
6013   return linux_common_core_of_thread (ptid);
6014 }
6015 
6016 bool
6017 linux_process_target::supports_disable_randomization ()
6018 {
6019   return true;
6020 }
6021 
6022 bool
6023 linux_process_target::supports_agent ()
6024 {
6025   return true;
6026 }
6027 
6028 bool
6029 linux_process_target::supports_range_stepping ()
6030 {
6031   if (supports_software_single_step ())
6032     return true;
6033 
6034   return low_supports_range_stepping ();
6035 }
6036 
6037 bool
6038 linux_process_target::low_supports_range_stepping ()
6039 {
6040   return false;
6041 }
6042 
6043 bool
6044 linux_process_target::supports_pid_to_exec_file ()
6045 {
6046   return true;
6047 }
6048 
6049 const char *
6050 linux_process_target::pid_to_exec_file (int pid)
6051 {
6052   return linux_proc_pid_to_exec_file (pid);
6053 }
6054 
6055 bool
6056 linux_process_target::supports_multifs ()
6057 {
6058   return true;
6059 }
6060 
6061 int
6062 linux_process_target::multifs_open (int pid, const char *filename,
6063 				    int flags, mode_t mode)
6064 {
6065   return linux_mntns_open_cloexec (pid, filename, flags, mode);
6066 }
6067 
6068 int
6069 linux_process_target::multifs_unlink (int pid, const char *filename)
6070 {
6071   return linux_mntns_unlink (pid, filename);
6072 }
6073 
6074 ssize_t
6075 linux_process_target::multifs_readlink (int pid, const char *filename,
6076 					char *buf, size_t bufsiz)
6077 {
6078   return linux_mntns_readlink (pid, filename, buf, bufsiz);
6079 }
6080 
6081 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6082 struct target_loadseg
6083 {
6084   /* Core address to which the segment is mapped.  */
6085   Elf32_Addr addr;
6086   /* VMA recorded in the program header.  */
6087   Elf32_Addr p_vaddr;
6088   /* Size of this segment in memory.  */
6089   Elf32_Word p_memsz;
6090 };
6091 
6092 # if defined PT_GETDSBT
6093 struct target_loadmap
6094 {
6095   /* Protocol version number, must be zero.  */
6096   Elf32_Word version;
6097   /* Pointer to the DSBT table, its size, and the DSBT index.  */
6098   unsigned *dsbt_table;
6099   unsigned dsbt_size, dsbt_index;
6100   /* Number of segments in this map.  */
6101   Elf32_Word nsegs;
6102   /* The actual memory map.  */
6103   struct target_loadseg segs[/*nsegs*/];
6104 };
6105 #  define LINUX_LOADMAP		PT_GETDSBT
6106 #  define LINUX_LOADMAP_EXEC	PTRACE_GETDSBT_EXEC
6107 #  define LINUX_LOADMAP_INTERP	PTRACE_GETDSBT_INTERP
6108 # else
6109 struct target_loadmap
6110 {
6111   /* Protocol version number, must be zero.  */
6112   Elf32_Half version;
6113   /* Number of segments in this map.  */
6114   Elf32_Half nsegs;
6115   /* The actual memory map.  */
6116   struct target_loadseg segs[/*nsegs*/];
6117 };
6118 #  define LINUX_LOADMAP		PTRACE_GETFDPIC
6119 #  define LINUX_LOADMAP_EXEC	PTRACE_GETFDPIC_EXEC
6120 #  define LINUX_LOADMAP_INTERP	PTRACE_GETFDPIC_INTERP
6121 # endif
6122 
6123 bool
6124 linux_process_target::supports_read_loadmap ()
6125 {
6126   return true;
6127 }
6128 
6129 int
6130 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6131 				    unsigned char *myaddr, unsigned int len)
6132 {
6133   int pid = lwpid_of (current_thread);
6134   int addr = -1;
6135   struct target_loadmap *data = NULL;
6136   unsigned int actual_length, copy_length;
6137 
6138   if (strcmp (annex, "exec") == 0)
6139     addr = (int) LINUX_LOADMAP_EXEC;
6140   else if (strcmp (annex, "interp") == 0)
6141     addr = (int) LINUX_LOADMAP_INTERP;
6142   else
6143     return -1;
6144 
6145   if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6146     return -1;
6147 
6148   if (data == NULL)
6149     return -1;
6150 
6151   actual_length = sizeof (struct target_loadmap)
6152     + sizeof (struct target_loadseg) * data->nsegs;
6153 
6154   if (offset < 0 || offset > actual_length)
6155     return -1;
6156 
6157   copy_length = actual_length - offset < len ? actual_length - offset : len;
6158   memcpy (myaddr, (char *) data + offset, copy_length);
6159   return copy_length;
6160 }
6161 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6162 
6163 bool
6164 linux_process_target::supports_catch_syscall ()
6165 {
6166   return low_supports_catch_syscall ();
6167 }
6168 
6169 bool
6170 linux_process_target::low_supports_catch_syscall ()
6171 {
6172   return false;
6173 }
6174 
6175 CORE_ADDR
6176 linux_process_target::read_pc (regcache *regcache)
6177 {
6178   if (!low_supports_breakpoints ())
6179     return 0;
6180 
6181   return low_get_pc (regcache);
6182 }
6183 
6184 void
6185 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6186 {
6187   gdb_assert (low_supports_breakpoints ());
6188 
6189   low_set_pc (regcache, pc);
6190 }
6191 
6192 bool
6193 linux_process_target::supports_thread_stopped ()
6194 {
6195   return true;
6196 }
6197 
6198 bool
6199 linux_process_target::thread_stopped (thread_info *thread)
6200 {
6201   return get_thread_lwp (thread)->stopped;
6202 }
6203 
6204 bool
6205 linux_process_target::any_resumed ()
6206 {
6207   bool any_resumed;
6208 
6209   auto status_pending_p_any = [&] (thread_info *thread)
6210     {
6211       return status_pending_p_callback (thread, minus_one_ptid);
6212     };
6213 
6214   auto not_stopped = [&] (thread_info *thread)
6215     {
6216       return not_stopped_callback (thread, minus_one_ptid);
6217     };
6218 
6219   /* Find a resumed LWP, if any.  */
6220   if (find_thread (status_pending_p_any) != NULL)
6221     any_resumed = 1;
6222   else if (find_thread (not_stopped) != NULL)
6223     any_resumed = 1;
6224   else
6225     any_resumed = 0;
6226 
6227   return any_resumed;
6228 }
6229 
6230 /* This exposes stop-all-threads functionality to other modules.  */
6231 
6232 void
6233 linux_process_target::pause_all (bool freeze)
6234 {
6235   stop_all_lwps (freeze, NULL);
6236 }
6237 
6238 /* This exposes unstop-all-threads functionality to other gdbserver
6239    modules.  */
6240 
6241 void
6242 linux_process_target::unpause_all (bool unfreeze)
6243 {
6244   unstop_all_lwps (unfreeze, NULL);
6245 }
6246 
6247 /* Extract &phdr and num_phdr in the inferior.  Return 0 on success.  */
6248 
6249 static int
6250 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6251 			       CORE_ADDR *phdr_memaddr, int *num_phdr)
6252 {
6253   char filename[PATH_MAX];
6254   int fd;
6255   const int auxv_size = is_elf64
6256     ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6257   char buf[sizeof (Elf64_auxv_t)];  /* The larger of the two.  */
6258 
6259   xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6260 
6261   fd = open (filename, O_RDONLY);
6262   if (fd < 0)
6263     return 1;
6264 
6265   *phdr_memaddr = 0;
6266   *num_phdr = 0;
6267   while (read (fd, buf, auxv_size) == auxv_size
6268 	 && (*phdr_memaddr == 0 || *num_phdr == 0))
6269     {
6270       if (is_elf64)
6271 	{
6272 	  Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6273 
6274 	  switch (aux->a_type)
6275 	    {
6276 	    case AT_PHDR:
6277 	      *phdr_memaddr = aux->a_un.a_val;
6278 	      break;
6279 	    case AT_PHNUM:
6280 	      *num_phdr = aux->a_un.a_val;
6281 	      break;
6282 	    }
6283 	}
6284       else
6285 	{
6286 	  Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6287 
6288 	  switch (aux->a_type)
6289 	    {
6290 	    case AT_PHDR:
6291 	      *phdr_memaddr = aux->a_un.a_val;
6292 	      break;
6293 	    case AT_PHNUM:
6294 	      *num_phdr = aux->a_un.a_val;
6295 	      break;
6296 	    }
6297 	}
6298     }
6299 
6300   close (fd);
6301 
6302   if (*phdr_memaddr == 0 || *num_phdr == 0)
6303     {
6304       warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6305 	       "phdr_memaddr = %ld, phdr_num = %d",
6306 	       (long) *phdr_memaddr, *num_phdr);
6307       return 2;
6308     }
6309 
6310   return 0;
6311 }
6312 
6313 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present.  */
6314 
6315 static CORE_ADDR
6316 get_dynamic (const int pid, const int is_elf64)
6317 {
6318   CORE_ADDR phdr_memaddr, relocation;
6319   int num_phdr, i;
6320   unsigned char *phdr_buf;
6321   const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6322 
6323   if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6324     return 0;
6325 
6326   gdb_assert (num_phdr < 100);  /* Basic sanity check.  */
6327   phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6328 
6329   if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6330     return 0;
6331 
6332   /* Compute relocation: it is expected to be 0 for "regular" executables,
6333      non-zero for PIE ones.  */
6334   relocation = -1;
6335   for (i = 0; relocation == -1 && i < num_phdr; i++)
6336     if (is_elf64)
6337       {
6338 	Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6339 
6340 	if (p->p_type == PT_PHDR)
6341 	  relocation = phdr_memaddr - p->p_vaddr;
6342       }
6343     else
6344       {
6345 	Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6346 
6347 	if (p->p_type == PT_PHDR)
6348 	  relocation = phdr_memaddr - p->p_vaddr;
6349       }
6350 
6351   if (relocation == -1)
6352     {
6353       /* PT_PHDR is optional, but necessary for PIE in general.  Fortunately
6354 	 any real world executables, including PIE executables, have always
6355 	 PT_PHDR present.  PT_PHDR is not present in some shared libraries or
6356 	 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6357 	 or present DT_DEBUG anyway (fpc binaries are statically linked).
6358 
6359 	 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6360 
6361 	 GDB could find RELOCATION also from AT_ENTRY - e_entry.  */
6362 
6363       return 0;
6364     }
6365 
6366   for (i = 0; i < num_phdr; i++)
6367     {
6368       if (is_elf64)
6369 	{
6370 	  Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6371 
6372 	  if (p->p_type == PT_DYNAMIC)
6373 	    return p->p_vaddr + relocation;
6374 	}
6375       else
6376 	{
6377 	  Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6378 
6379 	  if (p->p_type == PT_DYNAMIC)
6380 	    return p->p_vaddr + relocation;
6381 	}
6382     }
6383 
6384   return 0;
6385 }
6386 
6387 /* Return &_r_debug in the inferior, or -1 if not present.  Return value
6388    can be 0 if the inferior does not yet have the library list initialized.
6389    We look for DT_MIPS_RLD_MAP first.  MIPS executables use this instead of
6390    DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too.  */
6391 
6392 static CORE_ADDR
6393 get_r_debug (const int pid, const int is_elf64)
6394 {
6395   CORE_ADDR dynamic_memaddr;
6396   const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6397   unsigned char buf[sizeof (Elf64_Dyn)];  /* The larger of the two.  */
6398   CORE_ADDR map = -1;
6399 
6400   dynamic_memaddr = get_dynamic (pid, is_elf64);
6401   if (dynamic_memaddr == 0)
6402     return map;
6403 
6404   while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6405     {
6406       if (is_elf64)
6407 	{
6408 	  Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6409 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6410 	  union
6411 	    {
6412 	      Elf64_Xword map;
6413 	      unsigned char buf[sizeof (Elf64_Xword)];
6414 	    }
6415 	  rld_map;
6416 #endif
6417 #ifdef DT_MIPS_RLD_MAP
6418 	  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6419 	    {
6420 	      if (linux_read_memory (dyn->d_un.d_val,
6421 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6422 		return rld_map.map;
6423 	      else
6424 		break;
6425 	    }
6426 #endif	/* DT_MIPS_RLD_MAP */
6427 #ifdef DT_MIPS_RLD_MAP_REL
6428 	  if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6429 	    {
6430 	      if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6431 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6432 		return rld_map.map;
6433 	      else
6434 		break;
6435 	    }
6436 #endif	/* DT_MIPS_RLD_MAP_REL */
6437 
6438 	  if (dyn->d_tag == DT_DEBUG && map == -1)
6439 	    map = dyn->d_un.d_val;
6440 
6441 	  if (dyn->d_tag == DT_NULL)
6442 	    break;
6443 	}
6444       else
6445 	{
6446 	  Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6447 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6448 	  union
6449 	    {
6450 	      Elf32_Word map;
6451 	      unsigned char buf[sizeof (Elf32_Word)];
6452 	    }
6453 	  rld_map;
6454 #endif
6455 #ifdef DT_MIPS_RLD_MAP
6456 	  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6457 	    {
6458 	      if (linux_read_memory (dyn->d_un.d_val,
6459 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6460 		return rld_map.map;
6461 	      else
6462 		break;
6463 	    }
6464 #endif	/* DT_MIPS_RLD_MAP */
6465 #ifdef DT_MIPS_RLD_MAP_REL
6466 	  if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6467 	    {
6468 	      if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6469 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6470 		return rld_map.map;
6471 	      else
6472 		break;
6473 	    }
6474 #endif	/* DT_MIPS_RLD_MAP_REL */
6475 
6476 	  if (dyn->d_tag == DT_DEBUG && map == -1)
6477 	    map = dyn->d_un.d_val;
6478 
6479 	  if (dyn->d_tag == DT_NULL)
6480 	    break;
6481 	}
6482 
6483       dynamic_memaddr += dyn_size;
6484     }
6485 
6486   return map;
6487 }
6488 
6489 /* Read one pointer from MEMADDR in the inferior.  */
6490 
6491 static int
6492 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6493 {
6494   int ret;
6495 
6496   /* Go through a union so this works on either big or little endian
6497      hosts, when the inferior's pointer size is smaller than the size
6498      of CORE_ADDR.  It is assumed the inferior's endianness is the
6499      same of the superior's.  */
6500   union
6501   {
6502     CORE_ADDR core_addr;
6503     unsigned int ui;
6504     unsigned char uc;
6505   } addr;
6506 
6507   ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6508   if (ret == 0)
6509     {
6510       if (ptr_size == sizeof (CORE_ADDR))
6511 	*ptr = addr.core_addr;
6512       else if (ptr_size == sizeof (unsigned int))
6513 	*ptr = addr.ui;
6514       else
6515 	gdb_assert_not_reached ("unhandled pointer size");
6516     }
6517   return ret;
6518 }
6519 
6520 bool
6521 linux_process_target::supports_qxfer_libraries_svr4 ()
6522 {
6523   return true;
6524 }
6525 
6526 struct link_map_offsets
6527   {
6528     /* Offset and size of r_debug.r_version.  */
6529     int r_version_offset;
6530 
6531     /* Offset and size of r_debug.r_map.  */
6532     int r_map_offset;
6533 
6534     /* Offset of r_debug_extended.r_next.  */
6535     int r_next_offset;
6536 
6537     /* Offset to l_addr field in struct link_map.  */
6538     int l_addr_offset;
6539 
6540     /* Offset to l_name field in struct link_map.  */
6541     int l_name_offset;
6542 
6543     /* Offset to l_ld field in struct link_map.  */
6544     int l_ld_offset;
6545 
6546     /* Offset to l_next field in struct link_map.  */
6547     int l_next_offset;
6548 
6549     /* Offset to l_prev field in struct link_map.  */
6550     int l_prev_offset;
6551   };
6552 
6553 static const link_map_offsets lmo_32bit_offsets =
6554   {
6555     0,     /* r_version offset.  */
6556     4,     /* r_debug.r_map offset.  */
6557     20,    /* r_debug_extended.r_next.  */
6558     0,     /* l_addr offset in link_map.  */
6559     4,     /* l_name offset in link_map.  */
6560     8,     /* l_ld offset in link_map.  */
6561     12,    /* l_next offset in link_map.  */
6562     16     /* l_prev offset in link_map.  */
6563   };
6564 
6565 static const link_map_offsets lmo_64bit_offsets =
6566   {
6567     0,     /* r_version offset.  */
6568     8,     /* r_debug.r_map offset.  */
6569     40,    /* r_debug_extended.r_next.  */
6570     0,     /* l_addr offset in link_map.  */
6571     8,     /* l_name offset in link_map.  */
6572     16,    /* l_ld offset in link_map.  */
6573     24,    /* l_next offset in link_map.  */
6574     32     /* l_prev offset in link_map.  */
6575   };
6576 
6577 /* Get the loaded shared libraries from one namespace.  */
6578 
6579 static void
6580 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6581 	       CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6582 {
6583   CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6584 
6585   while (lm_addr
6586 	 && read_one_ptr (lm_addr + lmo->l_name_offset,
6587 			  &l_name, ptr_size) == 0
6588 	 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6589 			  &l_addr, ptr_size) == 0
6590 	 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6591 			  &l_ld, ptr_size) == 0
6592 	 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6593 			  &l_prev, ptr_size) == 0
6594 	 && read_one_ptr (lm_addr + lmo->l_next_offset,
6595 			  &l_next, ptr_size) == 0)
6596     {
6597       unsigned char libname[PATH_MAX];
6598 
6599       if (lm_prev != l_prev)
6600 	{
6601 	  warning ("Corrupted shared library list: 0x%s != 0x%s",
6602 		   paddress (lm_prev), paddress (l_prev));
6603 	  break;
6604 	}
6605 
6606       /* Not checking for error because reading may stop before we've got
6607 	 PATH_MAX worth of characters.  */
6608       libname[0] = '\0';
6609       linux_read_memory (l_name, libname, sizeof (libname) - 1);
6610       libname[sizeof (libname) - 1] = '\0';
6611       if (libname[0] != '\0')
6612 	{
6613 	  string_appendf (document, "<library name=\"");
6614 	  xml_escape_text_append (document, (char *) libname);
6615 	  string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6616 			  "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6617 			  paddress (lm_addr), paddress (l_addr),
6618 			  paddress (l_ld), paddress (lmid));
6619 	}
6620 
6621       lm_prev = lm_addr;
6622       lm_addr = l_next;
6623     }
6624 }
6625 
6626 /* Construct qXfer:libraries-svr4:read reply.  */
6627 
6628 int
6629 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6630 					    unsigned char *readbuf,
6631 					    unsigned const char *writebuf,
6632 					    CORE_ADDR offset, int len)
6633 {
6634   struct process_info_private *const priv = current_process ()->priv;
6635   char filename[PATH_MAX];
6636   int pid, is_elf64;
6637   unsigned int machine;
6638   CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6639 
6640   if (writebuf != NULL)
6641     return -2;
6642   if (readbuf == NULL)
6643     return -1;
6644 
6645   pid = lwpid_of (current_thread);
6646   xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6647   is_elf64 = elf_64_file_p (filename, &machine);
6648   const link_map_offsets *lmo;
6649   int ptr_size;
6650   if (is_elf64)
6651     {
6652       lmo = &lmo_64bit_offsets;
6653       ptr_size = 8;
6654     }
6655   else
6656     {
6657       lmo = &lmo_32bit_offsets;
6658       ptr_size = 4;
6659     }
6660 
6661   while (annex[0] != '\0')
6662     {
6663       const char *sep;
6664       CORE_ADDR *addrp;
6665       int name_len;
6666 
6667       sep = strchr (annex, '=');
6668       if (sep == NULL)
6669 	break;
6670 
6671       name_len = sep - annex;
6672       if (name_len == 4 && startswith (annex, "lmid"))
6673 	addrp = &lmid;
6674       else if (name_len == 5 && startswith (annex, "start"))
6675 	addrp = &lm_addr;
6676       else if (name_len == 4 && startswith (annex, "prev"))
6677 	addrp = &lm_prev;
6678       else
6679 	{
6680 	  annex = strchr (sep, ';');
6681 	  if (annex == NULL)
6682 	    break;
6683 	  annex++;
6684 	  continue;
6685 	}
6686 
6687       annex = decode_address_to_semicolon (addrp, sep + 1);
6688     }
6689 
6690   std::string document = "<library-list-svr4 version=\"1.0\"";
6691 
6692   /* When the starting LM_ADDR is passed in the annex, only traverse that
6693      namespace, which is assumed to be identified by LMID.
6694 
6695      Otherwise, start with R_DEBUG and traverse all namespaces we find.  */
6696   if (lm_addr != 0)
6697     {
6698       document += ">";
6699       read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6700     }
6701   else
6702     {
6703       if (lm_prev != 0)
6704 	warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6705 
6706       /* We could interpret LMID as 'provide only the libraries for this
6707 	 namespace' but GDB is currently only providing lmid, start, and
6708 	 prev, or nothing.  */
6709       if (lmid != 0)
6710 	warning ("ignoring lmid=0x%s without start", paddress (lmid));
6711 
6712       CORE_ADDR r_debug = priv->r_debug;
6713       if (r_debug == 0)
6714 	r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6715 
6716       /* We failed to find DT_DEBUG.  Such situation will not change
6717 	 for this inferior - do not retry it.  Report it to GDB as
6718 	 E01, see for the reasons at the GDB solib-svr4.c side.  */
6719       if (r_debug == (CORE_ADDR) -1)
6720 	return -1;
6721 
6722       /* Terminate the header if we end up with an empty list.  */
6723       if (r_debug == 0)
6724 	document += ">";
6725 
6726       while (r_debug != 0)
6727 	{
6728 	  int r_version = 0;
6729 	  if (linux_read_memory (r_debug + lmo->r_version_offset,
6730 				 (unsigned char *) &r_version,
6731 				 sizeof (r_version)) != 0)
6732 	    {
6733 	      warning ("unable to read r_version from 0x%s",
6734 		       paddress (r_debug + lmo->r_version_offset));
6735 	      break;
6736 	    }
6737 
6738 	  if (r_version < 1)
6739 	    {
6740 	      warning ("unexpected r_debug version %d", r_version);
6741 	      break;
6742 	    }
6743 
6744 	  if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6745 			    ptr_size) != 0)
6746 	    {
6747 	      warning ("unable to read r_map from 0x%s",
6748 		       paddress (r_debug + lmo->r_map_offset));
6749 	      break;
6750 	    }
6751 
6752 	  /* We read the entire namespace.  */
6753 	  lm_prev = 0;
6754 
6755 	  /* The first entry corresponds to the main executable unless the
6756 	     dynamic loader was loaded late by a static executable.  But
6757 	     in such case the main executable does not have PT_DYNAMIC
6758 	     present and we would not have gotten here.  */
6759 	  if (r_debug == priv->r_debug)
6760 	    {
6761 	      if (lm_addr != 0)
6762 		string_appendf (document, " main-lm=\"0x%s\">",
6763 				paddress (lm_addr));
6764 	      else
6765 		document += ">";
6766 
6767 	      lm_prev = lm_addr;
6768 	      if (read_one_ptr (lm_addr + lmo->l_next_offset,
6769 				&lm_addr, ptr_size) != 0)
6770 		{
6771 		  warning ("unable to read l_next from 0x%s",
6772 			   paddress (lm_addr + lmo->l_next_offset));
6773 		  break;
6774 		}
6775 	    }
6776 
6777 	  read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6778 
6779 	  if (r_version < 2)
6780 	    break;
6781 
6782 	  if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6783 			    ptr_size) != 0)
6784 	    {
6785 	      warning ("unable to read r_next from 0x%s",
6786 		       paddress (r_debug + lmo->r_next_offset));
6787 	      break;
6788 	    }
6789 	}
6790     }
6791 
6792   document += "</library-list-svr4>";
6793 
6794   int document_len = document.length ();
6795   if (offset < document_len)
6796     document_len -= offset;
6797   else
6798     document_len = 0;
6799   if (len > document_len)
6800     len = document_len;
6801 
6802   memcpy (readbuf, document.data () + offset, len);
6803 
6804   return len;
6805 }
6806 
6807 #ifdef HAVE_LINUX_BTRACE
6808 
6809 bool
6810 linux_process_target::supports_btrace ()
6811 {
6812   return true;
6813 }
6814 
6815 btrace_target_info *
6816 linux_process_target::enable_btrace (thread_info *tp,
6817 				     const btrace_config *conf)
6818 {
6819   return linux_enable_btrace (tp->id, conf);
6820 }
6821 
6822 /* See to_disable_btrace target method.  */
6823 
6824 int
6825 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6826 {
6827   enum btrace_error err;
6828 
6829   err = linux_disable_btrace (tinfo);
6830   return (err == BTRACE_ERR_NONE ? 0 : -1);
6831 }
6832 
6833 /* Encode an Intel Processor Trace configuration.  */
6834 
6835 static void
6836 linux_low_encode_pt_config (std::string *buffer,
6837 			    const struct btrace_data_pt_config *config)
6838 {
6839   *buffer += "<pt-config>\n";
6840 
6841   switch (config->cpu.vendor)
6842     {
6843     case CV_INTEL:
6844       string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6845 			  "model=\"%u\" stepping=\"%u\"/>\n",
6846 			  config->cpu.family, config->cpu.model,
6847 			  config->cpu.stepping);
6848       break;
6849 
6850     default:
6851       break;
6852     }
6853 
6854   *buffer += "</pt-config>\n";
6855 }
6856 
6857 /* Encode a raw buffer.  */
6858 
6859 static void
6860 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6861 		      unsigned int size)
6862 {
6863   if (size == 0)
6864     return;
6865 
6866   /* We use hex encoding - see gdbsupport/rsp-low.h.  */
6867   *buffer += "<raw>\n";
6868 
6869   while (size-- > 0)
6870     {
6871       char elem[2];
6872 
6873       elem[0] = tohex ((*data >> 4) & 0xf);
6874       elem[1] = tohex (*data++ & 0xf);
6875 
6876       buffer->append (elem, 2);
6877     }
6878 
6879   *buffer += "</raw>\n";
6880 }
6881 
6882 /* See to_read_btrace target method.  */
6883 
6884 int
6885 linux_process_target::read_btrace (btrace_target_info *tinfo,
6886 				   std::string *buffer,
6887 				   enum btrace_read_type type)
6888 {
6889   struct btrace_data btrace;
6890   enum btrace_error err;
6891 
6892   err = linux_read_btrace (&btrace, tinfo, type);
6893   if (err != BTRACE_ERR_NONE)
6894     {
6895       if (err == BTRACE_ERR_OVERFLOW)
6896 	*buffer += "E.Overflow.";
6897       else
6898 	*buffer += "E.Generic Error.";
6899 
6900       return -1;
6901     }
6902 
6903   switch (btrace.format)
6904     {
6905     case BTRACE_FORMAT_NONE:
6906       *buffer += "E.No Trace.";
6907       return -1;
6908 
6909     case BTRACE_FORMAT_BTS:
6910       *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6911       *buffer += "<btrace version=\"1.0\">\n";
6912 
6913       for (const btrace_block &block : *btrace.variant.bts.blocks)
6914 	string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6915 			    paddress (block.begin), paddress (block.end));
6916 
6917       *buffer += "</btrace>\n";
6918       break;
6919 
6920     case BTRACE_FORMAT_PT:
6921       *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6922       *buffer += "<btrace version=\"1.0\">\n";
6923       *buffer += "<pt>\n";
6924 
6925       linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6926 
6927       linux_low_encode_raw (buffer, btrace.variant.pt.data,
6928 			    btrace.variant.pt.size);
6929 
6930       *buffer += "</pt>\n";
6931       *buffer += "</btrace>\n";
6932       break;
6933 
6934     default:
6935       *buffer += "E.Unsupported Trace Format.";
6936       return -1;
6937     }
6938 
6939   return 0;
6940 }
6941 
6942 /* See to_btrace_conf target method.  */
6943 
6944 int
6945 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6946 					std::string *buffer)
6947 {
6948   const struct btrace_config *conf;
6949 
6950   *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6951   *buffer += "<btrace-conf version=\"1.0\">\n";
6952 
6953   conf = linux_btrace_conf (tinfo);
6954   if (conf != NULL)
6955     {
6956       switch (conf->format)
6957 	{
6958 	case BTRACE_FORMAT_NONE:
6959 	  break;
6960 
6961 	case BTRACE_FORMAT_BTS:
6962 	  string_xml_appendf (*buffer, "<bts");
6963 	  string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6964 	  string_xml_appendf (*buffer, " />\n");
6965 	  break;
6966 
6967 	case BTRACE_FORMAT_PT:
6968 	  string_xml_appendf (*buffer, "<pt");
6969 	  string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6970 	  string_xml_appendf (*buffer, "/>\n");
6971 	  break;
6972 	}
6973     }
6974 
6975   *buffer += "</btrace-conf>\n";
6976   return 0;
6977 }
6978 #endif /* HAVE_LINUX_BTRACE */
6979 
6980 /* See nat/linux-nat.h.  */
6981 
6982 ptid_t
6983 current_lwp_ptid (void)
6984 {
6985   return ptid_of (current_thread);
6986 }
6987 
6988 /* A helper function that copies NAME to DEST, replacing non-printable
6989    characters with '?'.  Returns the original DEST as a
6990    convenience.  */
6991 
6992 static const char *
6993 replace_non_ascii (char *dest, const char *name)
6994 {
6995   const char *result = dest;
6996   while (*name != '\0')
6997     {
6998       if (!ISPRINT (*name))
6999 	*dest++ = '?';
7000       else
7001 	*dest++ = *name;
7002       ++name;
7003     }
7004   *dest = '\0';
7005   return result;
7006 }
7007 
7008 const char *
7009 linux_process_target::thread_name (ptid_t thread)
7010 {
7011   static char dest[100];
7012 
7013   const char *name = linux_proc_tid_get_name (thread);
7014   if (name == nullptr)
7015     return nullptr;
7016 
7017   /* Linux limits the comm file to 16 bytes (including the trailing
7018      \0.  If the program or thread name is set when using a multi-byte
7019      encoding, this might cause it to be truncated mid-character.  In
7020      this situation, sending the truncated form in an XML <thread>
7021      response will cause a parse error in gdb.  So, instead convert
7022      from the locale's encoding (we can't be sure this is the correct
7023      encoding, but it's as good a guess as we have) to UTF-8, but in a
7024      way that ignores any encoding errors.  See PR remote/30618.  */
7025   const char *cset = nl_langinfo (CODESET);
7026   iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7027   if (handle == (iconv_t) -1)
7028     return replace_non_ascii (dest, name);
7029 
7030   size_t inbytes = strlen (name);
7031   char *inbuf = const_cast<char *> (name);
7032   size_t outbytes = sizeof (dest);
7033   char *outbuf = dest;
7034   size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7035 
7036   if (result == (size_t) -1)
7037     {
7038       if (errno == E2BIG)
7039 	outbuf = &dest[sizeof (dest) - 1];
7040       else if ((errno == EILSEQ || errno == EINVAL)
7041 	       && outbuf < &dest[sizeof (dest) - 2])
7042 	*outbuf++ = '?';
7043     }
7044   *outbuf = '\0';
7045 
7046   iconv_close (handle);
7047   return *dest == '\0' ? nullptr : dest;
7048 }
7049 
7050 #if USE_THREAD_DB
7051 bool
7052 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7053 				     int *handle_len)
7054 {
7055   return thread_db_thread_handle (ptid, handle, handle_len);
7056 }
7057 #endif
7058 
7059 thread_info *
7060 linux_process_target::thread_pending_parent (thread_info *thread)
7061 {
7062   lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7063 
7064   if (parent == nullptr)
7065     return nullptr;
7066 
7067   return get_lwp_thread (parent);
7068 }
7069 
7070 thread_info *
7071 linux_process_target::thread_pending_child (thread_info *thread,
7072 					    target_waitkind *kind)
7073 {
7074   lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7075 
7076   if (child == nullptr)
7077     return nullptr;
7078 
7079   return get_lwp_thread (child);
7080 }
7081 
7082 /* Default implementation of linux_target_ops method "set_pc" for
7083    32-bit pc register which is literally named "pc".  */
7084 
7085 void
7086 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7087 {
7088   uint32_t newpc = pc;
7089 
7090   supply_register_by_name (regcache, "pc", &newpc);
7091 }
7092 
7093 /* Default implementation of linux_target_ops method "get_pc" for
7094    32-bit pc register which is literally named "pc".  */
7095 
7096 CORE_ADDR
7097 linux_get_pc_32bit (struct regcache *regcache)
7098 {
7099   uint32_t pc;
7100 
7101   collect_register_by_name (regcache, "pc", &pc);
7102   threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7103   return pc;
7104 }
7105 
7106 /* Default implementation of linux_target_ops method "set_pc" for
7107    64-bit pc register which is literally named "pc".  */
7108 
7109 void
7110 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7111 {
7112   uint64_t newpc = pc;
7113 
7114   supply_register_by_name (regcache, "pc", &newpc);
7115 }
7116 
7117 /* Default implementation of linux_target_ops method "get_pc" for
7118    64-bit pc register which is literally named "pc".  */
7119 
7120 CORE_ADDR
7121 linux_get_pc_64bit (struct regcache *regcache)
7122 {
7123   uint64_t pc;
7124 
7125   collect_register_by_name (regcache, "pc", &pc);
7126   threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7127   return pc;
7128 }
7129 
7130 /* See linux-low.h.  */
7131 
7132 int
7133 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7134 {
7135   gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7136   int offset = 0;
7137 
7138   gdb_assert (wordsize == 4 || wordsize == 8);
7139 
7140   while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7141 	 == 2 * wordsize)
7142     {
7143       if (wordsize == 4)
7144 	{
7145 	  uint32_t *data_p = (uint32_t *) data;
7146 	  if (data_p[0] == match)
7147 	    {
7148 	      *valp = data_p[1];
7149 	      return 1;
7150 	    }
7151 	}
7152       else
7153 	{
7154 	  uint64_t *data_p = (uint64_t *) data;
7155 	  if (data_p[0] == match)
7156 	    {
7157 	      *valp = data_p[1];
7158 	      return 1;
7159 	    }
7160 	}
7161 
7162       offset += 2 * wordsize;
7163     }
7164 
7165   return 0;
7166 }
7167 
7168 /* See linux-low.h.  */
7169 
7170 CORE_ADDR
7171 linux_get_hwcap (int pid, int wordsize)
7172 {
7173   CORE_ADDR hwcap = 0;
7174   linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7175   return hwcap;
7176 }
7177 
7178 /* See linux-low.h.  */
7179 
7180 CORE_ADDR
7181 linux_get_hwcap2 (int pid, int wordsize)
7182 {
7183   CORE_ADDR hwcap2 = 0;
7184   linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7185   return hwcap2;
7186 }
7187 
7188 #ifdef HAVE_LINUX_REGSETS
7189 void
7190 initialize_regsets_info (struct regsets_info *info)
7191 {
7192   for (info->num_regsets = 0;
7193        info->regsets[info->num_regsets].size >= 0;
7194        info->num_regsets++)
7195     ;
7196 }
7197 #endif
7198 
7199 void
7200 initialize_low (void)
7201 {
7202   struct sigaction sigchld_action;
7203 
7204   memset (&sigchld_action, 0, sizeof (sigchld_action));
7205   set_target_ops (the_linux_target);
7206 
7207   linux_ptrace_init_warnings ();
7208   linux_proc_init_warnings ();
7209 
7210   sigchld_action.sa_handler = sigchld_handler;
7211   sigemptyset (&sigchld_action.sa_mask);
7212   sigchld_action.sa_flags = SA_RESTART;
7213   sigaction (SIGCHLD, &sigchld_action, NULL);
7214 
7215   initialize_low_arch ();
7216 
7217   linux_check_ptrace_features ();
7218 }
7219