xref: /netbsd-src/external/gpl3/gdb/dist/gdb/linux-nat.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /* GNU/Linux native-dependent code common to multiple platforms.
2 
3    Copyright (C) 2001-2015 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-child.h"
41 #include "inf-ptrace.h"
42 #include "auxv.h"
43 #include <sys/procfs.h>		/* for elf_gregset etc.  */
44 #include "elf-bfd.h"		/* for elfcore_write_* */
45 #include "gregset.h"		/* for gregset */
46 #include "gdbcore.h"		/* for get_exec_file */
47 #include <ctype.h>		/* for isdigit */
48 #include <sys/stat.h>		/* for struct stat */
49 #include <fcntl.h>		/* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include <dirent.h>
56 #include "xml-support.h"
57 #include <sys/vfs.h>
58 #include "solib.h"
59 #include "nat/linux-osdata.h"
60 #include "linux-tdep.h"
61 #include "symfile.h"
62 #include "agent.h"
63 #include "tracepoint.h"
64 #include "buffer.h"
65 #include "target-descriptions.h"
66 #include "filestuff.h"
67 #include "objfiles.h"
68 
69 #ifndef SPUFS_MAGIC
70 #define SPUFS_MAGIC 0x23c9b64e
71 #endif
72 
73 #ifdef HAVE_PERSONALITY
74 # include <sys/personality.h>
75 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
76 #  define ADDR_NO_RANDOMIZE 0x0040000
77 # endif
78 #endif /* HAVE_PERSONALITY */
79 
80 /* This comment documents high-level logic of this file.
81 
82 Waiting for events in sync mode
83 ===============================
84 
85 When waiting for an event in a specific thread, we just use waitpid, passing
86 the specific pid, and not passing WNOHANG.
87 
88 When waiting for an event in all threads, waitpid is not quite good.  Prior to
89 version 2.4, Linux can either wait for event in main thread, or in secondary
90 threads.  (2.4 has the __WALL flag).  So, if we use blocking waitpid, we might
91 miss an event.  The solution is to use non-blocking waitpid, together with
92 sigsuspend.  First, we use non-blocking waitpid to get an event in the main
93 process, if any.  Second, we use non-blocking waitpid with the __WCLONED
94 flag to check for events in cloned processes.  If nothing is found, we use
95 sigsuspend to wait for SIGCHLD.  When SIGCHLD arrives, it means something
96 happened to a child process -- and SIGCHLD will be delivered both for events
97 in main debugged process and in cloned processes.  As soon as we know there's
98 an event, we get back to calling nonblocking waitpid with and without
99 __WCLONED.
100 
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
102 so that we don't miss a signal.  If SIGCHLD arrives in between, when it's
103 blocked, the signal becomes pending and sigsuspend immediately
104 notices it and returns.
105 
106 Waiting for events in async mode
107 ================================
108 
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options.  Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target.  We
113 detect asynchronous target events by handling SIGCHLD signals.  To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe.  This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122 
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad.  OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128 
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop.  Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135 
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls.  Those waitpid calls, while blocking, are guarantied to
138 return quickly.  E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143 
144 Use of signals
145 ==============
146 
147 We stop threads by sending a SIGSTOP.  The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it.  SIGSTOP's advantage is that it can not be
150 blocked.  A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152 
153 Two other signals that can't be blocked are SIGCONT and SIGKILL.  But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered.  SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157 
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd.  But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161 
162 We could use a real-time signal instead.  This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked.  */
167 
168 #ifndef O_LARGEFILE
169 #define O_LARGEFILE 0
170 #endif
171 
172 /* The single-threaded native GNU/Linux target_ops.  We save a pointer for
173    the use of the multi-threaded target.  */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
176 
177 /* The method to call, if any, when a new thread is attached.  */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
179 
180 /* The method to call, if any, when a new fork is attached.  */
181 static linux_nat_new_fork_ftype *linux_nat_new_fork;
182 
183 /* The method to call, if any, when a process is no longer
184    attached.  */
185 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
186 
187 /* Hook to call prior to resuming a thread.  */
188 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
189 
190 /* The method to call, if any, when the siginfo object needs to be
191    converted between the layout returned by ptrace, and the layout in
192    the architecture of the inferior.  */
193 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
194 				       gdb_byte *,
195 				       int);
196 
197 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198    Called by our to_xfer_partial.  */
199 static target_xfer_partial_ftype *super_xfer_partial;
200 
201 /* The saved to_close method, inherited from inf-ptrace.c.
202    Called by our to_close.  */
203 static void (*super_close) (struct target_ops *);
204 
205 static unsigned int debug_linux_nat;
206 static void
207 show_debug_linux_nat (struct ui_file *file, int from_tty,
208 		      struct cmd_list_element *c, const char *value)
209 {
210   fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 		    value);
212 }
213 
214 struct simple_pid_list
215 {
216   int pid;
217   int status;
218   struct simple_pid_list *next;
219 };
220 struct simple_pid_list *stopped_pids;
221 
222 /* Async mode support.  */
223 
224 /* The read/write ends of the pipe registered as waitable file in the
225    event loop.  */
226 static int linux_nat_event_pipe[2] = { -1, -1 };
227 
228 /* True if we're currently in async mode.  */
229 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
230 
231 /* Flush the event pipe.  */
232 
233 static void
234 async_file_flush (void)
235 {
236   int ret;
237   char buf;
238 
239   do
240     {
241       ret = read (linux_nat_event_pipe[0], &buf, 1);
242     }
243   while (ret >= 0 || (ret == -1 && errno == EINTR));
244 }
245 
246 /* Put something (anything, doesn't matter what, or how much) in event
247    pipe, so that the select/poll in the event-loop realizes we have
248    something to process.  */
249 
250 static void
251 async_file_mark (void)
252 {
253   int ret;
254 
255   /* It doesn't really matter what the pipe contains, as long we end
256      up with something in it.  Might as well flush the previous
257      left-overs.  */
258   async_file_flush ();
259 
260   do
261     {
262       ret = write (linux_nat_event_pipe[1], "+", 1);
263     }
264   while (ret == -1 && errno == EINTR);
265 
266   /* Ignore EAGAIN.  If the pipe is full, the event loop will already
267      be awakened anyway.  */
268 }
269 
270 static int kill_lwp (int lwpid, int signo);
271 
272 static int stop_callback (struct lwp_info *lp, void *data);
273 
274 static void block_child_signals (sigset_t *prev_mask);
275 static void restore_child_signals_mask (sigset_t *prev_mask);
276 
277 struct lwp_info;
278 static struct lwp_info *add_lwp (ptid_t ptid);
279 static void purge_lwp_list (int pid);
280 static void delete_lwp (ptid_t ptid);
281 static struct lwp_info *find_lwp_pid (ptid_t ptid);
282 
283 static int lwp_status_pending_p (struct lwp_info *lp);
284 
285 static int check_stopped_by_breakpoint (struct lwp_info *lp);
286 static int sigtrap_is_event (int status);
287 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
288 
289 
290 /* Trivial list manipulation functions to keep track of a list of
291    new stopped processes.  */
292 static void
293 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
294 {
295   struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
296 
297   new_pid->pid = pid;
298   new_pid->status = status;
299   new_pid->next = *listp;
300   *listp = new_pid;
301 }
302 
303 static int
304 in_pid_list_p (struct simple_pid_list *list, int pid)
305 {
306   struct simple_pid_list *p;
307 
308   for (p = list; p != NULL; p = p->next)
309     if (p->pid == pid)
310       return 1;
311   return 0;
312 }
313 
314 static int
315 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
316 {
317   struct simple_pid_list **p;
318 
319   for (p = listp; *p != NULL; p = &(*p)->next)
320     if ((*p)->pid == pid)
321       {
322 	struct simple_pid_list *next = (*p)->next;
323 
324 	*statusp = (*p)->status;
325 	xfree (*p);
326 	*p = next;
327 	return 1;
328       }
329   return 0;
330 }
331 
332 /* Initialize ptrace warnings and check for supported ptrace
333    features given PID.
334 
335    ATTACHED should be nonzero iff we attached to the inferior.  */
336 
337 static void
338 linux_init_ptrace (pid_t pid, int attached)
339 {
340   linux_enable_event_reporting (pid, attached);
341   linux_ptrace_init_warnings ();
342 }
343 
344 static void
345 linux_child_post_attach (struct target_ops *self, int pid)
346 {
347   linux_init_ptrace (pid, 1);
348 }
349 
350 static void
351 linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
352 {
353   linux_init_ptrace (ptid_get_pid (ptid), 0);
354 }
355 
356 /* Return the number of known LWPs in the tgid given by PID.  */
357 
358 static int
359 num_lwps (int pid)
360 {
361   int count = 0;
362   struct lwp_info *lp;
363 
364   for (lp = lwp_list; lp; lp = lp->next)
365     if (ptid_get_pid (lp->ptid) == pid)
366       count++;
367 
368   return count;
369 }
370 
371 /* Call delete_lwp with prototype compatible for make_cleanup.  */
372 
373 static void
374 delete_lwp_cleanup (void *lp_voidp)
375 {
376   struct lwp_info *lp = lp_voidp;
377 
378   delete_lwp (lp->ptid);
379 }
380 
381 /* Target hook for follow_fork.  On entry inferior_ptid must be the
382    ptid of the followed inferior.  At return, inferior_ptid will be
383    unchanged.  */
384 
385 static int
386 linux_child_follow_fork (struct target_ops *ops, int follow_child,
387 			 int detach_fork)
388 {
389   if (!follow_child)
390     {
391       struct lwp_info *child_lp = NULL;
392       int status = W_STOPCODE (0);
393       struct cleanup *old_chain;
394       int has_vforked;
395       int parent_pid, child_pid;
396 
397       has_vforked = (inferior_thread ()->pending_follow.kind
398 		     == TARGET_WAITKIND_VFORKED);
399       parent_pid = ptid_get_lwp (inferior_ptid);
400       if (parent_pid == 0)
401 	parent_pid = ptid_get_pid (inferior_ptid);
402       child_pid
403 	= ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
404 
405 
406       /* We're already attached to the parent, by default.  */
407       old_chain = save_inferior_ptid ();
408       inferior_ptid = ptid_build (child_pid, child_pid, 0);
409       child_lp = add_lwp (inferior_ptid);
410       child_lp->stopped = 1;
411       child_lp->last_resume_kind = resume_stop;
412 
413       /* Detach new forked process?  */
414       if (detach_fork)
415 	{
416 	  make_cleanup (delete_lwp_cleanup, child_lp);
417 
418 	  if (linux_nat_prepare_to_resume != NULL)
419 	    linux_nat_prepare_to_resume (child_lp);
420 
421 	  /* When debugging an inferior in an architecture that supports
422 	     hardware single stepping on a kernel without commit
423 	     6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
424 	     process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
425 	     set if the parent process had them set.
426 	     To work around this, single step the child process
427 	     once before detaching to clear the flags.  */
428 
429 	  if (!gdbarch_software_single_step_p (target_thread_architecture
430 						   (child_lp->ptid)))
431 	    {
432 	      linux_disable_event_reporting (child_pid);
433 	      if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
434 		perror_with_name (_("Couldn't do single step"));
435 	      if (my_waitpid (child_pid, &status, 0) < 0)
436 		perror_with_name (_("Couldn't wait vfork process"));
437 	    }
438 
439 	  if (WIFSTOPPED (status))
440 	    {
441 	      int signo;
442 
443 	      signo = WSTOPSIG (status);
444 	      if (signo != 0
445 		  && !signal_pass_state (gdb_signal_from_host (signo)))
446 		signo = 0;
447 	      ptrace (PTRACE_DETACH, child_pid, 0, signo);
448 	    }
449 
450 	  /* Resets value of inferior_ptid to parent ptid.  */
451 	  do_cleanups (old_chain);
452 	}
453       else
454 	{
455 	  /* Let the thread_db layer learn about this new process.  */
456 	  check_for_thread_db ();
457 	}
458 
459       do_cleanups (old_chain);
460 
461       if (has_vforked)
462 	{
463 	  struct lwp_info *parent_lp;
464 
465 	  parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
466 	  gdb_assert (linux_supports_tracefork () >= 0);
467 
468 	  if (linux_supports_tracevforkdone ())
469 	    {
470   	      if (debug_linux_nat)
471   		fprintf_unfiltered (gdb_stdlog,
472   				    "LCFF: waiting for VFORK_DONE on %d\n",
473   				    parent_pid);
474 	      parent_lp->stopped = 1;
475 
476 	      /* We'll handle the VFORK_DONE event like any other
477 		 event, in target_wait.  */
478 	    }
479 	  else
480 	    {
481 	      /* We can't insert breakpoints until the child has
482 		 finished with the shared memory region.  We need to
483 		 wait until that happens.  Ideal would be to just
484 		 call:
485 		 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
486 		 - waitpid (parent_pid, &status, __WALL);
487 		 However, most architectures can't handle a syscall
488 		 being traced on the way out if it wasn't traced on
489 		 the way in.
490 
491 		 We might also think to loop, continuing the child
492 		 until it exits or gets a SIGTRAP.  One problem is
493 		 that the child might call ptrace with PTRACE_TRACEME.
494 
495 		 There's no simple and reliable way to figure out when
496 		 the vforked child will be done with its copy of the
497 		 shared memory.  We could step it out of the syscall,
498 		 two instructions, let it go, and then single-step the
499 		 parent once.  When we have hardware single-step, this
500 		 would work; with software single-step it could still
501 		 be made to work but we'd have to be able to insert
502 		 single-step breakpoints in the child, and we'd have
503 		 to insert -just- the single-step breakpoint in the
504 		 parent.  Very awkward.
505 
506 		 In the end, the best we can do is to make sure it
507 		 runs for a little while.  Hopefully it will be out of
508 		 range of any breakpoints we reinsert.  Usually this
509 		 is only the single-step breakpoint at vfork's return
510 		 point.  */
511 
512   	      if (debug_linux_nat)
513   		fprintf_unfiltered (gdb_stdlog,
514 				    "LCFF: no VFORK_DONE "
515 				    "support, sleeping a bit\n");
516 
517 	      usleep (10000);
518 
519 	      /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
520 		 and leave it pending.  The next linux_nat_resume call
521 		 will notice a pending event, and bypasses actually
522 		 resuming the inferior.  */
523 	      parent_lp->status = 0;
524 	      parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
525 	      parent_lp->stopped = 1;
526 
527 	      /* If we're in async mode, need to tell the event loop
528 		 there's something here to process.  */
529 	      if (target_is_async_p ())
530 		async_file_mark ();
531 	    }
532 	}
533     }
534   else
535     {
536       struct lwp_info *child_lp;
537 
538       child_lp = add_lwp (inferior_ptid);
539       child_lp->stopped = 1;
540       child_lp->last_resume_kind = resume_stop;
541 
542       /* Let the thread_db layer learn about this new process.  */
543       check_for_thread_db ();
544     }
545 
546   return 0;
547 }
548 
549 
550 static int
551 linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
552 {
553   return !linux_supports_tracefork ();
554 }
555 
556 static int
557 linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
558 {
559   return 0;
560 }
561 
562 static int
563 linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
564 {
565   return !linux_supports_tracefork ();
566 }
567 
568 static int
569 linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
570 {
571   return 0;
572 }
573 
574 static int
575 linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
576 {
577   return !linux_supports_tracefork ();
578 }
579 
580 static int
581 linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
582 {
583   return 0;
584 }
585 
586 static int
587 linux_child_set_syscall_catchpoint (struct target_ops *self,
588 				    int pid, int needed, int any_count,
589 				    int table_size, int *table)
590 {
591   if (!linux_supports_tracesysgood ())
592     return 1;
593 
594   /* On GNU/Linux, we ignore the arguments.  It means that we only
595      enable the syscall catchpoints, but do not disable them.
596 
597      Also, we do not use the `table' information because we do not
598      filter system calls here.  We let GDB do the logic for us.  */
599   return 0;
600 }
601 
602 /* On GNU/Linux there are no real LWP's.  The closest thing to LWP's
603    are processes sharing the same VM space.  A multi-threaded process
604    is basically a group of such processes.  However, such a grouping
605    is almost entirely a user-space issue; the kernel doesn't enforce
606    such a grouping at all (this might change in the future).  In
607    general, we'll rely on the threads library (i.e. the GNU/Linux
608    Threads library) to provide such a grouping.
609 
610    It is perfectly well possible to write a multi-threaded application
611    without the assistance of a threads library, by using the clone
612    system call directly.  This module should be able to give some
613    rudimentary support for debugging such applications if developers
614    specify the CLONE_PTRACE flag in the clone system call, and are
615    using the Linux kernel 2.4 or above.
616 
617    Note that there are some peculiarities in GNU/Linux that affect
618    this code:
619 
620    - In general one should specify the __WCLONE flag to waitpid in
621      order to make it report events for any of the cloned processes
622      (and leave it out for the initial process).  However, if a cloned
623      process has exited the exit status is only reported if the
624      __WCLONE flag is absent.  Linux kernel 2.4 has a __WALL flag, but
625      we cannot use it since GDB must work on older systems too.
626 
627    - When a traced, cloned process exits and is waited for by the
628      debugger, the kernel reassigns it to the original parent and
629      keeps it around as a "zombie".  Somehow, the GNU/Linux Threads
630      library doesn't notice this, which leads to the "zombie problem":
631      When debugged a multi-threaded process that spawns a lot of
632      threads will run out of processes, even if the threads exit,
633      because the "zombies" stay around.  */
634 
635 /* List of known LWPs.  */
636 struct lwp_info *lwp_list;
637 
638 
639 /* Original signal mask.  */
640 static sigset_t normal_mask;
641 
642 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
643    _initialize_linux_nat.  */
644 static sigset_t suspend_mask;
645 
646 /* Signals to block to make that sigsuspend work.  */
647 static sigset_t blocked_mask;
648 
649 /* SIGCHLD action.  */
650 struct sigaction sigchld_action;
651 
652 /* Block child signals (SIGCHLD and linux threads signals), and store
653    the previous mask in PREV_MASK.  */
654 
655 static void
656 block_child_signals (sigset_t *prev_mask)
657 {
658   /* Make sure SIGCHLD is blocked.  */
659   if (!sigismember (&blocked_mask, SIGCHLD))
660     sigaddset (&blocked_mask, SIGCHLD);
661 
662   sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
663 }
664 
665 /* Restore child signals mask, previously returned by
666    block_child_signals.  */
667 
668 static void
669 restore_child_signals_mask (sigset_t *prev_mask)
670 {
671   sigprocmask (SIG_SETMASK, prev_mask, NULL);
672 }
673 
674 /* Mask of signals to pass directly to the inferior.  */
675 static sigset_t pass_mask;
676 
677 /* Update signals to pass to the inferior.  */
678 static void
679 linux_nat_pass_signals (struct target_ops *self,
680 			int numsigs, unsigned char *pass_signals)
681 {
682   int signo;
683 
684   sigemptyset (&pass_mask);
685 
686   for (signo = 1; signo < NSIG; signo++)
687     {
688       int target_signo = gdb_signal_from_host (signo);
689       if (target_signo < numsigs && pass_signals[target_signo])
690         sigaddset (&pass_mask, signo);
691     }
692 }
693 
694 
695 
696 /* Prototypes for local functions.  */
697 static int stop_wait_callback (struct lwp_info *lp, void *data);
698 static int linux_thread_alive (ptid_t ptid);
699 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
700 
701 
702 
703 /* Destroy and free LP.  */
704 
705 static void
706 lwp_free (struct lwp_info *lp)
707 {
708   xfree (lp->arch_private);
709   xfree (lp);
710 }
711 
712 /* Remove all LWPs belong to PID from the lwp list.  */
713 
714 static void
715 purge_lwp_list (int pid)
716 {
717   struct lwp_info *lp, *lpprev, *lpnext;
718 
719   lpprev = NULL;
720 
721   for (lp = lwp_list; lp; lp = lpnext)
722     {
723       lpnext = lp->next;
724 
725       if (ptid_get_pid (lp->ptid) == pid)
726 	{
727 	  if (lp == lwp_list)
728 	    lwp_list = lp->next;
729 	  else
730 	    lpprev->next = lp->next;
731 
732 	  lwp_free (lp);
733 	}
734       else
735 	lpprev = lp;
736     }
737 }
738 
739 /* Add the LWP specified by PTID to the list.  PTID is the first LWP
740    in the process.  Return a pointer to the structure describing the
741    new LWP.
742 
743    This differs from add_lwp in that we don't let the arch specific
744    bits know about this new thread.  Current clients of this callback
745    take the opportunity to install watchpoints in the new thread, and
746    we shouldn't do that for the first thread.  If we're spawning a
747    child ("run"), the thread executes the shell wrapper first, and we
748    shouldn't touch it until it execs the program we want to debug.
749    For "attach", it'd be okay to call the callback, but it's not
750    necessary, because watchpoints can't yet have been inserted into
751    the inferior.  */
752 
753 static struct lwp_info *
754 add_initial_lwp (ptid_t ptid)
755 {
756   struct lwp_info *lp;
757 
758   gdb_assert (ptid_lwp_p (ptid));
759 
760   lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
761 
762   memset (lp, 0, sizeof (struct lwp_info));
763 
764   lp->last_resume_kind = resume_continue;
765   lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
766 
767   lp->ptid = ptid;
768   lp->core = -1;
769 
770   lp->next = lwp_list;
771   lwp_list = lp;
772 
773   return lp;
774 }
775 
776 /* Add the LWP specified by PID to the list.  Return a pointer to the
777    structure describing the new LWP.  The LWP should already be
778    stopped.  */
779 
780 static struct lwp_info *
781 add_lwp (ptid_t ptid)
782 {
783   struct lwp_info *lp;
784 
785   lp = add_initial_lwp (ptid);
786 
787   /* Let the arch specific bits know about this new thread.  Current
788      clients of this callback take the opportunity to install
789      watchpoints in the new thread.  We don't do this for the first
790      thread though.  See add_initial_lwp.  */
791   if (linux_nat_new_thread != NULL)
792     linux_nat_new_thread (lp);
793 
794   return lp;
795 }
796 
797 /* Remove the LWP specified by PID from the list.  */
798 
799 static void
800 delete_lwp (ptid_t ptid)
801 {
802   struct lwp_info *lp, *lpprev;
803 
804   lpprev = NULL;
805 
806   for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
807     if (ptid_equal (lp->ptid, ptid))
808       break;
809 
810   if (!lp)
811     return;
812 
813   if (lpprev)
814     lpprev->next = lp->next;
815   else
816     lwp_list = lp->next;
817 
818   lwp_free (lp);
819 }
820 
821 /* Return a pointer to the structure describing the LWP corresponding
822    to PID.  If no corresponding LWP could be found, return NULL.  */
823 
824 static struct lwp_info *
825 find_lwp_pid (ptid_t ptid)
826 {
827   struct lwp_info *lp;
828   int lwp;
829 
830   if (ptid_lwp_p (ptid))
831     lwp = ptid_get_lwp (ptid);
832   else
833     lwp = ptid_get_pid (ptid);
834 
835   for (lp = lwp_list; lp; lp = lp->next)
836     if (lwp == ptid_get_lwp (lp->ptid))
837       return lp;
838 
839   return NULL;
840 }
841 
842 /* Call CALLBACK with its second argument set to DATA for every LWP in
843    the list.  If CALLBACK returns 1 for a particular LWP, return a
844    pointer to the structure describing that LWP immediately.
845    Otherwise return NULL.  */
846 
847 struct lwp_info *
848 iterate_over_lwps (ptid_t filter,
849 		   int (*callback) (struct lwp_info *, void *),
850 		   void *data)
851 {
852   struct lwp_info *lp, *lpnext;
853 
854   for (lp = lwp_list; lp; lp = lpnext)
855     {
856       lpnext = lp->next;
857 
858       if (ptid_match (lp->ptid, filter))
859 	{
860 	  if ((*callback) (lp, data))
861 	    return lp;
862 	}
863     }
864 
865   return NULL;
866 }
867 
868 /* Update our internal state when changing from one checkpoint to
869    another indicated by NEW_PTID.  We can only switch single-threaded
870    applications, so we only create one new LWP, and the previous list
871    is discarded.  */
872 
873 void
874 linux_nat_switch_fork (ptid_t new_ptid)
875 {
876   struct lwp_info *lp;
877 
878   purge_lwp_list (ptid_get_pid (inferior_ptid));
879 
880   lp = add_lwp (new_ptid);
881   lp->stopped = 1;
882 
883   /* This changes the thread's ptid while preserving the gdb thread
884      num.  Also changes the inferior pid, while preserving the
885      inferior num.  */
886   thread_change_ptid (inferior_ptid, new_ptid);
887 
888   /* We've just told GDB core that the thread changed target id, but,
889      in fact, it really is a different thread, with different register
890      contents.  */
891   registers_changed ();
892 }
893 
894 /* Handle the exit of a single thread LP.  */
895 
896 static void
897 exit_lwp (struct lwp_info *lp)
898 {
899   struct thread_info *th = find_thread_ptid (lp->ptid);
900 
901   if (th)
902     {
903       if (print_thread_events)
904 	printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
905 
906       delete_thread (lp->ptid);
907     }
908 
909   delete_lwp (lp->ptid);
910 }
911 
912 /* Wait for the LWP specified by LP, which we have just attached to.
913    Returns a wait status for that LWP, to cache.  */
914 
915 static int
916 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
917 			    int *signalled)
918 {
919   pid_t new_pid, pid = ptid_get_lwp (ptid);
920   int status;
921 
922   if (linux_proc_pid_is_stopped (pid))
923     {
924       if (debug_linux_nat)
925 	fprintf_unfiltered (gdb_stdlog,
926 			    "LNPAW: Attaching to a stopped process\n");
927 
928       /* The process is definitely stopped.  It is in a job control
929 	 stop, unless the kernel predates the TASK_STOPPED /
930 	 TASK_TRACED distinction, in which case it might be in a
931 	 ptrace stop.  Make sure it is in a ptrace stop; from there we
932 	 can kill it, signal it, et cetera.
933 
934          First make sure there is a pending SIGSTOP.  Since we are
935 	 already attached, the process can not transition from stopped
936 	 to running without a PTRACE_CONT; so we know this signal will
937 	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
938 	 probably already in the queue (unless this kernel is old
939 	 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
940 	 is not an RT signal, it can only be queued once.  */
941       kill_lwp (pid, SIGSTOP);
942 
943       /* Finally, resume the stopped process.  This will deliver the SIGSTOP
944 	 (or a higher priority signal, just like normal PTRACE_ATTACH).  */
945       ptrace (PTRACE_CONT, pid, 0, 0);
946     }
947 
948   /* Make sure the initial process is stopped.  The user-level threads
949      layer might want to poke around in the inferior, and that won't
950      work if things haven't stabilized yet.  */
951   new_pid = my_waitpid (pid, &status, 0);
952   if (new_pid == -1 && errno == ECHILD)
953     {
954       if (first)
955 	warning (_("%s is a cloned process"), target_pid_to_str (ptid));
956 
957       /* Try again with __WCLONE to check cloned processes.  */
958       new_pid = my_waitpid (pid, &status, __WCLONE);
959       *cloned = 1;
960     }
961 
962   gdb_assert (pid == new_pid);
963 
964   if (!WIFSTOPPED (status))
965     {
966       /* The pid we tried to attach has apparently just exited.  */
967       if (debug_linux_nat)
968 	fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
969 			    pid, status_to_str (status));
970       return status;
971     }
972 
973   if (WSTOPSIG (status) != SIGSTOP)
974     {
975       *signalled = 1;
976       if (debug_linux_nat)
977 	fprintf_unfiltered (gdb_stdlog,
978 			    "LNPAW: Received %s after attaching\n",
979 			    status_to_str (status));
980     }
981 
982   return status;
983 }
984 
985 /* Attach to the LWP specified by PID.  Return 0 if successful, -1 if
986    the new LWP could not be attached, or 1 if we're already auto
987    attached to this thread, but haven't processed the
988    PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
989    its existance, without considering it an error.  */
990 
991 int
992 lin_lwp_attach_lwp (ptid_t ptid)
993 {
994   struct lwp_info *lp;
995   int lwpid;
996 
997   gdb_assert (ptid_lwp_p (ptid));
998 
999   lp = find_lwp_pid (ptid);
1000   lwpid = ptid_get_lwp (ptid);
1001 
1002   /* We assume that we're already attached to any LWP that has an id
1003      equal to the overall process id, and to any LWP that is already
1004      in our list of LWPs.  If we're not seeing exit events from threads
1005      and we've had PID wraparound since we last tried to stop all threads,
1006      this assumption might be wrong; fortunately, this is very unlikely
1007      to happen.  */
1008   if (lwpid != ptid_get_pid (ptid) && lp == NULL)
1009     {
1010       int status, cloned = 0, signalled = 0;
1011 
1012       if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1013 	{
1014 	  if (linux_supports_tracefork ())
1015 	    {
1016 	      /* If we haven't stopped all threads when we get here,
1017 		 we may have seen a thread listed in thread_db's list,
1018 		 but not processed the PTRACE_EVENT_CLONE yet.  If
1019 		 that's the case, ignore this new thread, and let
1020 		 normal event handling discover it later.  */
1021 	      if (in_pid_list_p (stopped_pids, lwpid))
1022 		{
1023 		  /* We've already seen this thread stop, but we
1024 		     haven't seen the PTRACE_EVENT_CLONE extended
1025 		     event yet.  */
1026 		  return 0;
1027 		}
1028 	      else
1029 		{
1030 		  int new_pid;
1031 		  int status;
1032 
1033 		  /* See if we've got a stop for this new child
1034 		     pending.  If so, we're already attached.  */
1035 		  gdb_assert (lwpid > 0);
1036 		  new_pid = my_waitpid (lwpid, &status, WNOHANG);
1037 		  if (new_pid == -1 && errno == ECHILD)
1038 		    new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1039 		  if (new_pid != -1)
1040 		    {
1041 		      if (WIFSTOPPED (status))
1042 			add_to_pid_list (&stopped_pids, lwpid, status);
1043 		      return 1;
1044 		    }
1045 		}
1046 	    }
1047 
1048 	  /* If we fail to attach to the thread, issue a warning,
1049 	     but continue.  One way this can happen is if thread
1050 	     creation is interrupted; as of Linux kernel 2.6.19, a
1051 	     bug may place threads in the thread list and then fail
1052 	     to create them.  */
1053 	  warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1054 		   safe_strerror (errno));
1055 	  return -1;
1056 	}
1057 
1058       if (debug_linux_nat)
1059 	fprintf_unfiltered (gdb_stdlog,
1060 			    "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1061 			    target_pid_to_str (ptid));
1062 
1063       status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1064       if (!WIFSTOPPED (status))
1065 	return 1;
1066 
1067       lp = add_lwp (ptid);
1068       lp->stopped = 1;
1069       lp->cloned = cloned;
1070       lp->signalled = signalled;
1071       if (WSTOPSIG (status) != SIGSTOP)
1072 	{
1073 	  lp->resumed = 1;
1074 	  lp->status = status;
1075 	}
1076 
1077       target_post_attach (ptid_get_lwp (lp->ptid));
1078 
1079       if (debug_linux_nat)
1080 	{
1081 	  fprintf_unfiltered (gdb_stdlog,
1082 			      "LLAL: waitpid %s received %s\n",
1083 			      target_pid_to_str (ptid),
1084 			      status_to_str (status));
1085 	}
1086     }
1087   else
1088     {
1089       /* We assume that the LWP representing the original process is
1090          already stopped.  Mark it as stopped in the data structure
1091          that the GNU/linux ptrace layer uses to keep track of
1092          threads.  Note that this won't have already been done since
1093          the main thread will have, we assume, been stopped by an
1094          attach from a different layer.  */
1095       if (lp == NULL)
1096 	lp = add_lwp (ptid);
1097       lp->stopped = 1;
1098     }
1099 
1100   lp->last_resume_kind = resume_stop;
1101   return 0;
1102 }
1103 
1104 static void
1105 linux_nat_create_inferior (struct target_ops *ops,
1106 			   char *exec_file, char *allargs, char **env,
1107 			   int from_tty)
1108 {
1109 #ifdef HAVE_PERSONALITY
1110   int personality_orig = 0, personality_set = 0;
1111 #endif /* HAVE_PERSONALITY */
1112 
1113   /* The fork_child mechanism is synchronous and calls target_wait, so
1114      we have to mask the async mode.  */
1115 
1116 #ifdef HAVE_PERSONALITY
1117   if (disable_randomization)
1118     {
1119       errno = 0;
1120       personality_orig = personality (0xffffffff);
1121       if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1122 	{
1123 	  personality_set = 1;
1124 	  personality (personality_orig | ADDR_NO_RANDOMIZE);
1125 	}
1126       if (errno != 0 || (personality_set
1127 			 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1128 	warning (_("Error disabling address space randomization: %s"),
1129 		 safe_strerror (errno));
1130     }
1131 #endif /* HAVE_PERSONALITY */
1132 
1133   /* Make sure we report all signals during startup.  */
1134   linux_nat_pass_signals (ops, 0, NULL);
1135 
1136   linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1137 
1138 #ifdef HAVE_PERSONALITY
1139   if (personality_set)
1140     {
1141       errno = 0;
1142       personality (personality_orig);
1143       if (errno != 0)
1144 	warning (_("Error restoring address space randomization: %s"),
1145 		 safe_strerror (errno));
1146     }
1147 #endif /* HAVE_PERSONALITY */
1148 }
1149 
1150 /* Callback for linux_proc_attach_tgid_threads.  Attach to PTID if not
1151    already attached.  Returns true if a new LWP is found, false
1152    otherwise.  */
1153 
1154 static int
1155 attach_proc_task_lwp_callback (ptid_t ptid)
1156 {
1157   struct lwp_info *lp;
1158 
1159   /* Ignore LWPs we're already attached to.  */
1160   lp = find_lwp_pid (ptid);
1161   if (lp == NULL)
1162     {
1163       int lwpid = ptid_get_lwp (ptid);
1164 
1165       if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1166 	{
1167 	  int err = errno;
1168 
1169 	  /* Be quiet if we simply raced with the thread exiting.
1170 	     EPERM is returned if the thread's task still exists, and
1171 	     is marked as exited or zombie, as well as other
1172 	     conditions, so in that case, confirm the status in
1173 	     /proc/PID/status.  */
1174 	  if (err == ESRCH
1175 	      || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1176 	    {
1177 	      if (debug_linux_nat)
1178 		{
1179 		  fprintf_unfiltered (gdb_stdlog,
1180 				      "Cannot attach to lwp %d: "
1181 				      "thread is gone (%d: %s)\n",
1182 				      lwpid, err, safe_strerror (err));
1183 		}
1184 	    }
1185 	  else
1186 	    {
1187 	      warning (_("Cannot attach to lwp %d: %s"),
1188 		       lwpid,
1189 		       linux_ptrace_attach_fail_reason_string (ptid,
1190 							       err));
1191 	    }
1192 	}
1193       else
1194 	{
1195 	  if (debug_linux_nat)
1196 	    fprintf_unfiltered (gdb_stdlog,
1197 				"PTRACE_ATTACH %s, 0, 0 (OK)\n",
1198 				target_pid_to_str (ptid));
1199 
1200 	  lp = add_lwp (ptid);
1201 	  lp->cloned = 1;
1202 
1203 	  /* The next time we wait for this LWP we'll see a SIGSTOP as
1204 	     PTRACE_ATTACH brings it to a halt.  */
1205 	  lp->signalled = 1;
1206 
1207 	  /* We need to wait for a stop before being able to make the
1208 	     next ptrace call on this LWP.  */
1209 	  lp->must_set_ptrace_flags = 1;
1210 	}
1211 
1212       return 1;
1213     }
1214   return 0;
1215 }
1216 
1217 static void
1218 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1219 {
1220   struct lwp_info *lp;
1221   int status;
1222   ptid_t ptid;
1223   volatile struct gdb_exception ex;
1224 
1225   /* Make sure we report all signals during attach.  */
1226   linux_nat_pass_signals (ops, 0, NULL);
1227 
1228   TRY_CATCH (ex, RETURN_MASK_ERROR)
1229     {
1230       linux_ops->to_attach (ops, args, from_tty);
1231     }
1232   if (ex.reason < 0)
1233     {
1234       pid_t pid = parse_pid_to_attach (args);
1235       struct buffer buffer;
1236       char *message, *buffer_s;
1237 
1238       message = xstrdup (ex.message);
1239       make_cleanup (xfree, message);
1240 
1241       buffer_init (&buffer);
1242       linux_ptrace_attach_fail_reason (pid, &buffer);
1243 
1244       buffer_grow_str0 (&buffer, "");
1245       buffer_s = buffer_finish (&buffer);
1246       make_cleanup (xfree, buffer_s);
1247 
1248       if (*buffer_s != '\0')
1249 	throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1250       else
1251 	throw_error (ex.error, "%s", message);
1252     }
1253 
1254   /* The ptrace base target adds the main thread with (pid,0,0)
1255      format.  Decorate it with lwp info.  */
1256   ptid = ptid_build (ptid_get_pid (inferior_ptid),
1257 		     ptid_get_pid (inferior_ptid),
1258 		     0);
1259   thread_change_ptid (inferior_ptid, ptid);
1260 
1261   /* Add the initial process as the first LWP to the list.  */
1262   lp = add_initial_lwp (ptid);
1263 
1264   status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1265 				       &lp->signalled);
1266   if (!WIFSTOPPED (status))
1267     {
1268       if (WIFEXITED (status))
1269 	{
1270 	  int exit_code = WEXITSTATUS (status);
1271 
1272 	  target_terminal_ours ();
1273 	  target_mourn_inferior ();
1274 	  if (exit_code == 0)
1275 	    error (_("Unable to attach: program exited normally."));
1276 	  else
1277 	    error (_("Unable to attach: program exited with code %d."),
1278 		   exit_code);
1279 	}
1280       else if (WIFSIGNALED (status))
1281 	{
1282 	  enum gdb_signal signo;
1283 
1284 	  target_terminal_ours ();
1285 	  target_mourn_inferior ();
1286 
1287 	  signo = gdb_signal_from_host (WTERMSIG (status));
1288 	  error (_("Unable to attach: program terminated with signal "
1289 		   "%s, %s."),
1290 		 gdb_signal_to_name (signo),
1291 		 gdb_signal_to_string (signo));
1292 	}
1293 
1294       internal_error (__FILE__, __LINE__,
1295 		      _("unexpected status %d for PID %ld"),
1296 		      status, (long) ptid_get_lwp (ptid));
1297     }
1298 
1299   lp->stopped = 1;
1300 
1301   /* Save the wait status to report later.  */
1302   lp->resumed = 1;
1303   if (debug_linux_nat)
1304     fprintf_unfiltered (gdb_stdlog,
1305 			"LNA: waitpid %ld, saving status %s\n",
1306 			(long) ptid_get_pid (lp->ptid), status_to_str (status));
1307 
1308   lp->status = status;
1309 
1310   /* We must attach to every LWP.  If /proc is mounted, use that to
1311      find them now.  The inferior may be using raw clone instead of
1312      using pthreads.  But even if it is using pthreads, thread_db
1313      walks structures in the inferior's address space to find the list
1314      of threads/LWPs, and those structures may well be corrupted.
1315      Note that once thread_db is loaded, we'll still use it to list
1316      threads and associate pthread info with each LWP.  */
1317   linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1318 				  attach_proc_task_lwp_callback);
1319 
1320   if (target_can_async_p ())
1321     target_async (inferior_event_handler, 0);
1322 }
1323 
1324 /* Get pending status of LP.  */
1325 static int
1326 get_pending_status (struct lwp_info *lp, int *status)
1327 {
1328   enum gdb_signal signo = GDB_SIGNAL_0;
1329 
1330   /* If we paused threads momentarily, we may have stored pending
1331      events in lp->status or lp->waitstatus (see stop_wait_callback),
1332      and GDB core hasn't seen any signal for those threads.
1333      Otherwise, the last signal reported to the core is found in the
1334      thread object's stop_signal.
1335 
1336      There's a corner case that isn't handled here at present.  Only
1337      if the thread stopped with a TARGET_WAITKIND_STOPPED does
1338      stop_signal make sense as a real signal to pass to the inferior.
1339      Some catchpoint related events, like
1340      TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1341      to GDB_SIGNAL_SIGTRAP when the catchpoint triggers.  But,
1342      those traps are debug API (ptrace in our case) related and
1343      induced; the inferior wouldn't see them if it wasn't being
1344      traced.  Hence, we should never pass them to the inferior, even
1345      when set to pass state.  Since this corner case isn't handled by
1346      infrun.c when proceeding with a signal, for consistency, neither
1347      do we handle it here (or elsewhere in the file we check for
1348      signal pass state).  Normally SIGTRAP isn't set to pass state, so
1349      this is really a corner case.  */
1350 
1351   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1352     signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal.  */
1353   else if (lp->status)
1354     signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1355   else if (non_stop && !is_executing (lp->ptid))
1356     {
1357       struct thread_info *tp = find_thread_ptid (lp->ptid);
1358 
1359       signo = tp->suspend.stop_signal;
1360     }
1361   else if (!non_stop)
1362     {
1363       struct target_waitstatus last;
1364       ptid_t last_ptid;
1365 
1366       get_last_target_status (&last_ptid, &last);
1367 
1368       if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1369 	{
1370 	  struct thread_info *tp = find_thread_ptid (lp->ptid);
1371 
1372 	  signo = tp->suspend.stop_signal;
1373 	}
1374     }
1375 
1376   *status = 0;
1377 
1378   if (signo == GDB_SIGNAL_0)
1379     {
1380       if (debug_linux_nat)
1381 	fprintf_unfiltered (gdb_stdlog,
1382 			    "GPT: lwp %s has no pending signal\n",
1383 			    target_pid_to_str (lp->ptid));
1384     }
1385   else if (!signal_pass_state (signo))
1386     {
1387       if (debug_linux_nat)
1388 	fprintf_unfiltered (gdb_stdlog,
1389 			    "GPT: lwp %s had signal %s, "
1390 			    "but it is in no pass state\n",
1391 			    target_pid_to_str (lp->ptid),
1392 			    gdb_signal_to_string (signo));
1393     }
1394   else
1395     {
1396       *status = W_STOPCODE (gdb_signal_to_host (signo));
1397 
1398       if (debug_linux_nat)
1399 	fprintf_unfiltered (gdb_stdlog,
1400 			    "GPT: lwp %s has pending signal %s\n",
1401 			    target_pid_to_str (lp->ptid),
1402 			    gdb_signal_to_string (signo));
1403     }
1404 
1405   return 0;
1406 }
1407 
1408 static int
1409 detach_callback (struct lwp_info *lp, void *data)
1410 {
1411   gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1412 
1413   if (debug_linux_nat && lp->status)
1414     fprintf_unfiltered (gdb_stdlog, "DC:  Pending %s for %s on detach.\n",
1415 			strsignal (WSTOPSIG (lp->status)),
1416 			target_pid_to_str (lp->ptid));
1417 
1418   /* If there is a pending SIGSTOP, get rid of it.  */
1419   if (lp->signalled)
1420     {
1421       if (debug_linux_nat)
1422 	fprintf_unfiltered (gdb_stdlog,
1423 			    "DC: Sending SIGCONT to %s\n",
1424 			    target_pid_to_str (lp->ptid));
1425 
1426       kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1427       lp->signalled = 0;
1428     }
1429 
1430   /* We don't actually detach from the LWP that has an id equal to the
1431      overall process id just yet.  */
1432   if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1433     {
1434       int status = 0;
1435 
1436       /* Pass on any pending signal for this LWP.  */
1437       get_pending_status (lp, &status);
1438 
1439       if (linux_nat_prepare_to_resume != NULL)
1440 	linux_nat_prepare_to_resume (lp);
1441       errno = 0;
1442       if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1443 		  WSTOPSIG (status)) < 0)
1444 	error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1445 	       safe_strerror (errno));
1446 
1447       if (debug_linux_nat)
1448 	fprintf_unfiltered (gdb_stdlog,
1449 			    "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1450 			    target_pid_to_str (lp->ptid),
1451 			    strsignal (WSTOPSIG (status)));
1452 
1453       delete_lwp (lp->ptid);
1454     }
1455 
1456   return 0;
1457 }
1458 
1459 static void
1460 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1461 {
1462   int pid;
1463   int status;
1464   struct lwp_info *main_lwp;
1465 
1466   pid = ptid_get_pid (inferior_ptid);
1467 
1468   /* Don't unregister from the event loop, as there may be other
1469      inferiors running. */
1470 
1471   /* Stop all threads before detaching.  ptrace requires that the
1472      thread is stopped to sucessfully detach.  */
1473   iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1474   /* ... and wait until all of them have reported back that
1475      they're no longer running.  */
1476   iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1477 
1478   iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1479 
1480   /* Only the initial process should be left right now.  */
1481   gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
1482 
1483   main_lwp = find_lwp_pid (pid_to_ptid (pid));
1484 
1485   /* Pass on any pending signal for the last LWP.  */
1486   if ((args == NULL || *args == '\0')
1487       && get_pending_status (main_lwp, &status) != -1
1488       && WIFSTOPPED (status))
1489     {
1490       char *tem;
1491 
1492       /* Put the signal number in ARGS so that inf_ptrace_detach will
1493 	 pass it along with PTRACE_DETACH.  */
1494       tem = alloca (8);
1495       xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1496       args = tem;
1497       if (debug_linux_nat)
1498 	fprintf_unfiltered (gdb_stdlog,
1499 			    "LND: Sending signal %s to %s\n",
1500 			    args,
1501 			    target_pid_to_str (main_lwp->ptid));
1502     }
1503 
1504   if (linux_nat_prepare_to_resume != NULL)
1505     linux_nat_prepare_to_resume (main_lwp);
1506   delete_lwp (main_lwp->ptid);
1507 
1508   if (forks_exist_p ())
1509     {
1510       /* Multi-fork case.  The current inferior_ptid is being detached
1511 	 from, but there are other viable forks to debug.  Detach from
1512 	 the current fork, and context-switch to the first
1513 	 available.  */
1514       linux_fork_detach (args, from_tty);
1515     }
1516   else
1517     linux_ops->to_detach (ops, args, from_tty);
1518 }
1519 
1520 /* Resume execution of the inferior process.  If STEP is nonzero,
1521    single-step it.  If SIGNAL is nonzero, give it that signal.  */
1522 
1523 static void
1524 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1525 {
1526   ptid_t ptid;
1527 
1528   lp->step = step;
1529 
1530   /* stop_pc doubles as the PC the LWP had when it was last resumed.
1531      We only presently need that if the LWP is stepped though (to
1532      handle the case of stepping a breakpoint instruction).  */
1533   if (step)
1534     {
1535       struct regcache *regcache = get_thread_regcache (lp->ptid);
1536 
1537       lp->stop_pc = regcache_read_pc (regcache);
1538     }
1539   else
1540     lp->stop_pc = 0;
1541 
1542   if (linux_nat_prepare_to_resume != NULL)
1543     linux_nat_prepare_to_resume (lp);
1544   /* Convert to something the lower layer understands.  */
1545   ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1546   linux_ops->to_resume (linux_ops, ptid, step, signo);
1547   lp->stop_reason = LWP_STOPPED_BY_NO_REASON;
1548   lp->stopped = 0;
1549   registers_changed_ptid (lp->ptid);
1550 }
1551 
1552 /* Resume LP.  */
1553 
1554 static void
1555 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1556 {
1557   if (lp->stopped)
1558     {
1559       struct inferior *inf = find_inferior_ptid (lp->ptid);
1560 
1561       if (inf->vfork_child != NULL)
1562 	{
1563 	  if (debug_linux_nat)
1564 	    fprintf_unfiltered (gdb_stdlog,
1565 				"RC: Not resuming %s (vfork parent)\n",
1566 				target_pid_to_str (lp->ptid));
1567 	}
1568       else if (!lwp_status_pending_p (lp))
1569 	{
1570 	  if (debug_linux_nat)
1571 	    fprintf_unfiltered (gdb_stdlog,
1572 				"RC: Resuming sibling %s, %s, %s\n",
1573 				target_pid_to_str (lp->ptid),
1574 				(signo != GDB_SIGNAL_0
1575 				 ? strsignal (gdb_signal_to_host (signo))
1576 				 : "0"),
1577 				step ? "step" : "resume");
1578 
1579 	  linux_resume_one_lwp (lp, step, signo);
1580 	}
1581       else
1582 	{
1583 	  if (debug_linux_nat)
1584 	    fprintf_unfiltered (gdb_stdlog,
1585 				"RC: Not resuming sibling %s (has pending)\n",
1586 				target_pid_to_str (lp->ptid));
1587 	}
1588     }
1589   else
1590     {
1591       if (debug_linux_nat)
1592 	fprintf_unfiltered (gdb_stdlog,
1593 			    "RC: Not resuming sibling %s (not stopped)\n",
1594 			    target_pid_to_str (lp->ptid));
1595     }
1596 }
1597 
1598 /* Callback for iterate_over_lwps.  If LWP is EXCEPT, do nothing.
1599    Resume LWP with the last stop signal, if it is in pass state.  */
1600 
1601 static int
1602 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1603 {
1604   enum gdb_signal signo = GDB_SIGNAL_0;
1605 
1606   if (lp == except)
1607     return 0;
1608 
1609   if (lp->stopped)
1610     {
1611       struct thread_info *thread;
1612 
1613       thread = find_thread_ptid (lp->ptid);
1614       if (thread != NULL)
1615 	{
1616 	  signo = thread->suspend.stop_signal;
1617 	  thread->suspend.stop_signal = GDB_SIGNAL_0;
1618 	}
1619     }
1620 
1621   resume_lwp (lp, 0, signo);
1622   return 0;
1623 }
1624 
1625 static int
1626 resume_clear_callback (struct lwp_info *lp, void *data)
1627 {
1628   lp->resumed = 0;
1629   lp->last_resume_kind = resume_stop;
1630   return 0;
1631 }
1632 
1633 static int
1634 resume_set_callback (struct lwp_info *lp, void *data)
1635 {
1636   lp->resumed = 1;
1637   lp->last_resume_kind = resume_continue;
1638   return 0;
1639 }
1640 
1641 static void
1642 linux_nat_resume (struct target_ops *ops,
1643 		  ptid_t ptid, int step, enum gdb_signal signo)
1644 {
1645   struct lwp_info *lp;
1646   int resume_many;
1647 
1648   if (debug_linux_nat)
1649     fprintf_unfiltered (gdb_stdlog,
1650 			"LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1651 			step ? "step" : "resume",
1652 			target_pid_to_str (ptid),
1653 			(signo != GDB_SIGNAL_0
1654 			 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1655 			target_pid_to_str (inferior_ptid));
1656 
1657   /* A specific PTID means `step only this process id'.  */
1658   resume_many = (ptid_equal (minus_one_ptid, ptid)
1659 		 || ptid_is_pid (ptid));
1660 
1661   /* Mark the lwps we're resuming as resumed.  */
1662   iterate_over_lwps (ptid, resume_set_callback, NULL);
1663 
1664   /* See if it's the current inferior that should be handled
1665      specially.  */
1666   if (resume_many)
1667     lp = find_lwp_pid (inferior_ptid);
1668   else
1669     lp = find_lwp_pid (ptid);
1670   gdb_assert (lp != NULL);
1671 
1672   /* Remember if we're stepping.  */
1673   lp->last_resume_kind = step ? resume_step : resume_continue;
1674 
1675   /* If we have a pending wait status for this thread, there is no
1676      point in resuming the process.  But first make sure that
1677      linux_nat_wait won't preemptively handle the event - we
1678      should never take this short-circuit if we are going to
1679      leave LP running, since we have skipped resuming all the
1680      other threads.  This bit of code needs to be synchronized
1681      with linux_nat_wait.  */
1682 
1683   if (lp->status && WIFSTOPPED (lp->status))
1684     {
1685       if (!lp->step
1686 	  && WSTOPSIG (lp->status)
1687 	  && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1688 	{
1689 	  if (debug_linux_nat)
1690 	    fprintf_unfiltered (gdb_stdlog,
1691 				"LLR: Not short circuiting for ignored "
1692 				"status 0x%x\n", lp->status);
1693 
1694 	  /* FIXME: What should we do if we are supposed to continue
1695 	     this thread with a signal?  */
1696 	  gdb_assert (signo == GDB_SIGNAL_0);
1697 	  signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1698 	  lp->status = 0;
1699 	}
1700     }
1701 
1702   if (lwp_status_pending_p (lp))
1703     {
1704       /* FIXME: What should we do if we are supposed to continue
1705 	 this thread with a signal?  */
1706       gdb_assert (signo == GDB_SIGNAL_0);
1707 
1708       if (debug_linux_nat)
1709 	fprintf_unfiltered (gdb_stdlog,
1710 			    "LLR: Short circuiting for status 0x%x\n",
1711 			    lp->status);
1712 
1713       if (target_can_async_p ())
1714 	{
1715 	  target_async (inferior_event_handler, 0);
1716 	  /* Tell the event loop we have something to process.  */
1717 	  async_file_mark ();
1718 	}
1719       return;
1720     }
1721 
1722   if (resume_many)
1723     iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1724 
1725   linux_resume_one_lwp (lp, step, signo);
1726 
1727   if (debug_linux_nat)
1728     fprintf_unfiltered (gdb_stdlog,
1729 			"LLR: %s %s, %s (resume event thread)\n",
1730 			step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1731 			target_pid_to_str (ptid),
1732 			(signo != GDB_SIGNAL_0
1733 			 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1734 
1735   if (target_can_async_p ())
1736     target_async (inferior_event_handler, 0);
1737 }
1738 
1739 /* Send a signal to an LWP.  */
1740 
1741 static int
1742 kill_lwp (int lwpid, int signo)
1743 {
1744   /* Use tkill, if possible, in case we are using nptl threads.  If tkill
1745      fails, then we are not using nptl threads and we should be using kill.  */
1746 
1747 #ifdef HAVE_TKILL_SYSCALL
1748   {
1749     static int tkill_failed;
1750 
1751     if (!tkill_failed)
1752       {
1753 	int ret;
1754 
1755 	errno = 0;
1756 	ret = syscall (__NR_tkill, lwpid, signo);
1757 	if (errno != ENOSYS)
1758 	  return ret;
1759 	tkill_failed = 1;
1760       }
1761   }
1762 #endif
1763 
1764   return kill (lwpid, signo);
1765 }
1766 
1767 /* Handle a GNU/Linux syscall trap wait response.  If we see a syscall
1768    event, check if the core is interested in it: if not, ignore the
1769    event, and keep waiting; otherwise, we need to toggle the LWP's
1770    syscall entry/exit status, since the ptrace event itself doesn't
1771    indicate it, and report the trap to higher layers.  */
1772 
1773 static int
1774 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1775 {
1776   struct target_waitstatus *ourstatus = &lp->waitstatus;
1777   struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1778   int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1779 
1780   if (stopping)
1781     {
1782       /* If we're stopping threads, there's a SIGSTOP pending, which
1783 	 makes it so that the LWP reports an immediate syscall return,
1784 	 followed by the SIGSTOP.  Skip seeing that "return" using
1785 	 PTRACE_CONT directly, and let stop_wait_callback collect the
1786 	 SIGSTOP.  Later when the thread is resumed, a new syscall
1787 	 entry event.  If we didn't do this (and returned 0), we'd
1788 	 leave a syscall entry pending, and our caller, by using
1789 	 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1790 	 itself.  Later, when the user re-resumes this LWP, we'd see
1791 	 another syscall entry event and we'd mistake it for a return.
1792 
1793 	 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1794 	 (leaving immediately with LWP->signalled set, without issuing
1795 	 a PTRACE_CONT), it would still be problematic to leave this
1796 	 syscall enter pending, as later when the thread is resumed,
1797 	 it would then see the same syscall exit mentioned above,
1798 	 followed by the delayed SIGSTOP, while the syscall didn't
1799 	 actually get to execute.  It seems it would be even more
1800 	 confusing to the user.  */
1801 
1802       if (debug_linux_nat)
1803 	fprintf_unfiltered (gdb_stdlog,
1804 			    "LHST: ignoring syscall %d "
1805 			    "for LWP %ld (stopping threads), "
1806 			    "resuming with PTRACE_CONT for SIGSTOP\n",
1807 			    syscall_number,
1808 			    ptid_get_lwp (lp->ptid));
1809 
1810       lp->syscall_state = TARGET_WAITKIND_IGNORE;
1811       ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1812       lp->stopped = 0;
1813       return 1;
1814     }
1815 
1816   if (catch_syscall_enabled ())
1817     {
1818       /* Always update the entry/return state, even if this particular
1819 	 syscall isn't interesting to the core now.  In async mode,
1820 	 the user could install a new catchpoint for this syscall
1821 	 between syscall enter/return, and we'll need to know to
1822 	 report a syscall return if that happens.  */
1823       lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1824 			   ? TARGET_WAITKIND_SYSCALL_RETURN
1825 			   : TARGET_WAITKIND_SYSCALL_ENTRY);
1826 
1827       if (catching_syscall_number (syscall_number))
1828 	{
1829 	  /* Alright, an event to report.  */
1830 	  ourstatus->kind = lp->syscall_state;
1831 	  ourstatus->value.syscall_number = syscall_number;
1832 
1833 	  if (debug_linux_nat)
1834 	    fprintf_unfiltered (gdb_stdlog,
1835 				"LHST: stopping for %s of syscall %d"
1836 				" for LWP %ld\n",
1837 				lp->syscall_state
1838 				== TARGET_WAITKIND_SYSCALL_ENTRY
1839 				? "entry" : "return",
1840 				syscall_number,
1841 				ptid_get_lwp (lp->ptid));
1842 	  return 0;
1843 	}
1844 
1845       if (debug_linux_nat)
1846 	fprintf_unfiltered (gdb_stdlog,
1847 			    "LHST: ignoring %s of syscall %d "
1848 			    "for LWP %ld\n",
1849 			    lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1850 			    ? "entry" : "return",
1851 			    syscall_number,
1852 			    ptid_get_lwp (lp->ptid));
1853     }
1854   else
1855     {
1856       /* If we had been syscall tracing, and hence used PT_SYSCALL
1857 	 before on this LWP, it could happen that the user removes all
1858 	 syscall catchpoints before we get to process this event.
1859 	 There are two noteworthy issues here:
1860 
1861 	 - When stopped at a syscall entry event, resuming with
1862 	   PT_STEP still resumes executing the syscall and reports a
1863 	   syscall return.
1864 
1865 	 - Only PT_SYSCALL catches syscall enters.  If we last
1866 	   single-stepped this thread, then this event can't be a
1867 	   syscall enter.  If we last single-stepped this thread, this
1868 	   has to be a syscall exit.
1869 
1870 	 The points above mean that the next resume, be it PT_STEP or
1871 	 PT_CONTINUE, can not trigger a syscall trace event.  */
1872       if (debug_linux_nat)
1873 	fprintf_unfiltered (gdb_stdlog,
1874 			    "LHST: caught syscall event "
1875 			    "with no syscall catchpoints."
1876 			    " %d for LWP %ld, ignoring\n",
1877 			    syscall_number,
1878 			    ptid_get_lwp (lp->ptid));
1879       lp->syscall_state = TARGET_WAITKIND_IGNORE;
1880     }
1881 
1882   /* The core isn't interested in this event.  For efficiency, avoid
1883      stopping all threads only to have the core resume them all again.
1884      Since we're not stopping threads, if we're still syscall tracing
1885      and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1886      subsequent syscall.  Simply resume using the inf-ptrace layer,
1887      which knows when to use PT_SYSCALL or PT_CONTINUE.  */
1888 
1889   linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1890   return 1;
1891 }
1892 
1893 /* Handle a GNU/Linux extended wait response.  If we see a clone
1894    event, we need to add the new LWP to our list (and not report the
1895    trap to higher layers).  This function returns non-zero if the
1896    event should be ignored and we should wait again.  If STOPPING is
1897    true, the new LWP remains stopped, otherwise it is continued.  */
1898 
1899 static int
1900 linux_handle_extended_wait (struct lwp_info *lp, int status,
1901 			    int stopping)
1902 {
1903   int pid = ptid_get_lwp (lp->ptid);
1904   struct target_waitstatus *ourstatus = &lp->waitstatus;
1905   int event = linux_ptrace_get_extended_event (status);
1906 
1907   if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1908       || event == PTRACE_EVENT_CLONE)
1909     {
1910       unsigned long new_pid;
1911       int ret;
1912 
1913       ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1914 
1915       /* If we haven't already seen the new PID stop, wait for it now.  */
1916       if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1917 	{
1918 	  /* The new child has a pending SIGSTOP.  We can't affect it until it
1919 	     hits the SIGSTOP, but we're already attached.  */
1920 	  ret = my_waitpid (new_pid, &status,
1921 			    (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1922 	  if (ret == -1)
1923 	    perror_with_name (_("waiting for new child"));
1924 	  else if (ret != new_pid)
1925 	    internal_error (__FILE__, __LINE__,
1926 			    _("wait returned unexpected PID %d"), ret);
1927 	  else if (!WIFSTOPPED (status))
1928 	    internal_error (__FILE__, __LINE__,
1929 			    _("wait returned unexpected status 0x%x"), status);
1930 	}
1931 
1932       ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1933 
1934       if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1935 	{
1936 	  /* The arch-specific native code may need to know about new
1937 	     forks even if those end up never mapped to an
1938 	     inferior.  */
1939 	  if (linux_nat_new_fork != NULL)
1940 	    linux_nat_new_fork (lp, new_pid);
1941 	}
1942 
1943       if (event == PTRACE_EVENT_FORK
1944 	  && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
1945 	{
1946 	  /* Handle checkpointing by linux-fork.c here as a special
1947 	     case.  We don't want the follow-fork-mode or 'catch fork'
1948 	     to interfere with this.  */
1949 
1950 	  /* This won't actually modify the breakpoint list, but will
1951 	     physically remove the breakpoints from the child.  */
1952 	  detach_breakpoints (ptid_build (new_pid, new_pid, 0));
1953 
1954 	  /* Retain child fork in ptrace (stopped) state.  */
1955 	  if (!find_fork_pid (new_pid))
1956 	    add_fork (new_pid);
1957 
1958 	  /* Report as spurious, so that infrun doesn't want to follow
1959 	     this fork.  We're actually doing an infcall in
1960 	     linux-fork.c.  */
1961 	  ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
1962 
1963 	  /* Report the stop to the core.  */
1964 	  return 0;
1965 	}
1966 
1967       if (event == PTRACE_EVENT_FORK)
1968 	ourstatus->kind = TARGET_WAITKIND_FORKED;
1969       else if (event == PTRACE_EVENT_VFORK)
1970 	ourstatus->kind = TARGET_WAITKIND_VFORKED;
1971       else
1972 	{
1973 	  struct lwp_info *new_lp;
1974 
1975 	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
1976 
1977 	  if (debug_linux_nat)
1978 	    fprintf_unfiltered (gdb_stdlog,
1979 				"LHEW: Got clone event "
1980 				"from LWP %d, new child is LWP %ld\n",
1981 				pid, new_pid);
1982 
1983 	  new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
1984 	  new_lp->cloned = 1;
1985 	  new_lp->stopped = 1;
1986 
1987 	  if (WSTOPSIG (status) != SIGSTOP)
1988 	    {
1989 	      /* This can happen if someone starts sending signals to
1990 		 the new thread before it gets a chance to run, which
1991 		 have a lower number than SIGSTOP (e.g. SIGUSR1).
1992 		 This is an unlikely case, and harder to handle for
1993 		 fork / vfork than for clone, so we do not try - but
1994 		 we handle it for clone events here.  We'll send
1995 		 the other signal on to the thread below.  */
1996 
1997 	      new_lp->signalled = 1;
1998 	    }
1999 	  else
2000 	    {
2001 	      struct thread_info *tp;
2002 
2003 	      /* When we stop for an event in some other thread, and
2004 		 pull the thread list just as this thread has cloned,
2005 		 we'll have seen the new thread in the thread_db list
2006 		 before handling the CLONE event (glibc's
2007 		 pthread_create adds the new thread to the thread list
2008 		 before clone'ing, and has the kernel fill in the
2009 		 thread's tid on the clone call with
2010 		 CLONE_PARENT_SETTID).  If that happened, and the core
2011 		 had requested the new thread to stop, we'll have
2012 		 killed it with SIGSTOP.  But since SIGSTOP is not an
2013 		 RT signal, it can only be queued once.  We need to be
2014 		 careful to not resume the LWP if we wanted it to
2015 		 stop.  In that case, we'll leave the SIGSTOP pending.
2016 		 It will later be reported as GDB_SIGNAL_0.  */
2017 	      tp = find_thread_ptid (new_lp->ptid);
2018 	      if (tp != NULL && tp->stop_requested)
2019 		new_lp->last_resume_kind = resume_stop;
2020 	      else
2021 		status = 0;
2022 	    }
2023 
2024 	  if (non_stop)
2025 	    {
2026 	      /* Add the new thread to GDB's lists as soon as possible
2027 		 so that:
2028 
2029 		 1) the frontend doesn't have to wait for a stop to
2030 		 display them, and,
2031 
2032 		 2) we tag it with the correct running state.  */
2033 
2034 	      /* If the thread_db layer is active, let it know about
2035 		 this new thread, and add it to GDB's list.  */
2036 	      if (!thread_db_attach_lwp (new_lp->ptid))
2037 		{
2038 		  /* We're not using thread_db.  Add it to GDB's
2039 		     list.  */
2040 		  target_post_attach (ptid_get_lwp (new_lp->ptid));
2041 		  add_thread (new_lp->ptid);
2042 		}
2043 
2044 	      if (!stopping)
2045 		{
2046 		  set_running (new_lp->ptid, 1);
2047 		  set_executing (new_lp->ptid, 1);
2048 		  /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2049 		     resume_stop.  */
2050 		  new_lp->last_resume_kind = resume_continue;
2051 		}
2052 	    }
2053 
2054 	  if (status != 0)
2055 	    {
2056 	      /* We created NEW_LP so it cannot yet contain STATUS.  */
2057 	      gdb_assert (new_lp->status == 0);
2058 
2059 	      /* Save the wait status to report later.  */
2060 	      if (debug_linux_nat)
2061 		fprintf_unfiltered (gdb_stdlog,
2062 				    "LHEW: waitpid of new LWP %ld, "
2063 				    "saving status %s\n",
2064 				    (long) ptid_get_lwp (new_lp->ptid),
2065 				    status_to_str (status));
2066 	      new_lp->status = status;
2067 	    }
2068 
2069 	  /* Note the need to use the low target ops to resume, to
2070 	     handle resuming with PT_SYSCALL if we have syscall
2071 	     catchpoints.  */
2072 	  if (!stopping)
2073 	    {
2074 	      new_lp->resumed = 1;
2075 
2076 	      if (status == 0)
2077 		{
2078 		  gdb_assert (new_lp->last_resume_kind == resume_continue);
2079 		  if (debug_linux_nat)
2080 		    fprintf_unfiltered (gdb_stdlog,
2081 					"LHEW: resuming new LWP %ld\n",
2082 					ptid_get_lwp (new_lp->ptid));
2083 		  linux_resume_one_lwp (new_lp, 0, GDB_SIGNAL_0);
2084 		}
2085 	    }
2086 
2087 	  if (debug_linux_nat)
2088 	    fprintf_unfiltered (gdb_stdlog,
2089 				"LHEW: resuming parent LWP %d\n", pid);
2090 	  linux_resume_one_lwp (lp, 0, GDB_SIGNAL_0);
2091 	  return 1;
2092 	}
2093 
2094       return 0;
2095     }
2096 
2097   if (event == PTRACE_EVENT_EXEC)
2098     {
2099       if (debug_linux_nat)
2100 	fprintf_unfiltered (gdb_stdlog,
2101 			    "LHEW: Got exec event from LWP %ld\n",
2102 			    ptid_get_lwp (lp->ptid));
2103 
2104       ourstatus->kind = TARGET_WAITKIND_EXECD;
2105       ourstatus->value.execd_pathname
2106 	= xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2107 
2108       /* The thread that execed must have been resumed, but, when a
2109 	 thread execs, it changes its tid to the tgid, and the old
2110 	 tgid thread might have not been resumed.  */
2111       lp->resumed = 1;
2112       return 0;
2113     }
2114 
2115   if (event == PTRACE_EVENT_VFORK_DONE)
2116     {
2117       if (current_inferior ()->waiting_for_vfork_done)
2118 	{
2119 	  if (debug_linux_nat)
2120 	    fprintf_unfiltered (gdb_stdlog,
2121 				"LHEW: Got expected PTRACE_EVENT_"
2122 				"VFORK_DONE from LWP %ld: stopping\n",
2123 				ptid_get_lwp (lp->ptid));
2124 
2125 	  ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2126 	  return 0;
2127 	}
2128 
2129       if (debug_linux_nat)
2130 	fprintf_unfiltered (gdb_stdlog,
2131 			    "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2132 			    "from LWP %ld: resuming\n",
2133 			    ptid_get_lwp (lp->ptid));
2134       ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2135       return 1;
2136     }
2137 
2138   internal_error (__FILE__, __LINE__,
2139 		  _("unknown ptrace event %d"), event);
2140 }
2141 
2142 /* Wait for LP to stop.  Returns the wait status, or 0 if the LWP has
2143    exited.  */
2144 
2145 static int
2146 wait_lwp (struct lwp_info *lp)
2147 {
2148   pid_t pid;
2149   int status = 0;
2150   int thread_dead = 0;
2151   sigset_t prev_mask;
2152 
2153   gdb_assert (!lp->stopped);
2154   gdb_assert (lp->status == 0);
2155 
2156   /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below.  */
2157   block_child_signals (&prev_mask);
2158 
2159   for (;;)
2160     {
2161       /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2162 	 was right and we should just call sigsuspend.  */
2163 
2164       pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2165       if (pid == -1 && errno == ECHILD)
2166 	pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2167       if (pid == -1 && errno == ECHILD)
2168 	{
2169 	  /* The thread has previously exited.  We need to delete it
2170 	     now because, for some vendor 2.4 kernels with NPTL
2171 	     support backported, there won't be an exit event unless
2172 	     it is the main thread.  2.6 kernels will report an exit
2173 	     event for each thread that exits, as expected.  */
2174 	  thread_dead = 1;
2175 	  if (debug_linux_nat)
2176 	    fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2177 				target_pid_to_str (lp->ptid));
2178 	}
2179       if (pid != 0)
2180 	break;
2181 
2182       /* Bugs 10970, 12702.
2183 	 Thread group leader may have exited in which case we'll lock up in
2184 	 waitpid if there are other threads, even if they are all zombies too.
2185 	 Basically, we're not supposed to use waitpid this way.
2186 	 __WCLONE is not applicable for the leader so we can't use that.
2187 	 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2188 	 process; it gets ESRCH both for the zombie and for running processes.
2189 
2190 	 As a workaround, check if we're waiting for the thread group leader and
2191 	 if it's a zombie, and avoid calling waitpid if it is.
2192 
2193 	 This is racy, what if the tgl becomes a zombie right after we check?
2194 	 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2195 	 waiting waitpid but linux_proc_pid_is_zombie is safe this way.  */
2196 
2197       if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2198 	  && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2199 	{
2200 	  thread_dead = 1;
2201 	  if (debug_linux_nat)
2202 	    fprintf_unfiltered (gdb_stdlog,
2203 				"WL: Thread group leader %s vanished.\n",
2204 				target_pid_to_str (lp->ptid));
2205 	  break;
2206 	}
2207 
2208       /* Wait for next SIGCHLD and try again.  This may let SIGCHLD handlers
2209 	 get invoked despite our caller had them intentionally blocked by
2210 	 block_child_signals.  This is sensitive only to the loop of
2211 	 linux_nat_wait_1 and there if we get called my_waitpid gets called
2212 	 again before it gets to sigsuspend so we can safely let the handlers
2213 	 get executed here.  */
2214 
2215       if (debug_linux_nat)
2216 	fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2217       sigsuspend (&suspend_mask);
2218     }
2219 
2220   restore_child_signals_mask (&prev_mask);
2221 
2222   if (!thread_dead)
2223     {
2224       gdb_assert (pid == ptid_get_lwp (lp->ptid));
2225 
2226       if (debug_linux_nat)
2227 	{
2228 	  fprintf_unfiltered (gdb_stdlog,
2229 			      "WL: waitpid %s received %s\n",
2230 			      target_pid_to_str (lp->ptid),
2231 			      status_to_str (status));
2232 	}
2233 
2234       /* Check if the thread has exited.  */
2235       if (WIFEXITED (status) || WIFSIGNALED (status))
2236 	{
2237 	  thread_dead = 1;
2238 	  if (debug_linux_nat)
2239 	    fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2240 				target_pid_to_str (lp->ptid));
2241 	}
2242     }
2243 
2244   if (thread_dead)
2245     {
2246       exit_lwp (lp);
2247       return 0;
2248     }
2249 
2250   gdb_assert (WIFSTOPPED (status));
2251   lp->stopped = 1;
2252 
2253   if (lp->must_set_ptrace_flags)
2254     {
2255       struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2256 
2257       linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2258       lp->must_set_ptrace_flags = 0;
2259     }
2260 
2261   /* Handle GNU/Linux's syscall SIGTRAPs.  */
2262   if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2263     {
2264       /* No longer need the sysgood bit.  The ptrace event ends up
2265 	 recorded in lp->waitstatus if we care for it.  We can carry
2266 	 on handling the event like a regular SIGTRAP from here
2267 	 on.  */
2268       status = W_STOPCODE (SIGTRAP);
2269       if (linux_handle_syscall_trap (lp, 1))
2270 	return wait_lwp (lp);
2271     }
2272 
2273   /* Handle GNU/Linux's extended waitstatus for trace events.  */
2274   if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2275       && linux_is_extended_waitstatus (status))
2276     {
2277       if (debug_linux_nat)
2278 	fprintf_unfiltered (gdb_stdlog,
2279 			    "WL: Handling extended status 0x%06x\n",
2280 			    status);
2281       if (linux_handle_extended_wait (lp, status, 1))
2282 	return wait_lwp (lp);
2283     }
2284 
2285   return status;
2286 }
2287 
2288 /* Send a SIGSTOP to LP.  */
2289 
2290 static int
2291 stop_callback (struct lwp_info *lp, void *data)
2292 {
2293   if (!lp->stopped && !lp->signalled)
2294     {
2295       int ret;
2296 
2297       if (debug_linux_nat)
2298 	{
2299 	  fprintf_unfiltered (gdb_stdlog,
2300 			      "SC:  kill %s **<SIGSTOP>**\n",
2301 			      target_pid_to_str (lp->ptid));
2302 	}
2303       errno = 0;
2304       ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2305       if (debug_linux_nat)
2306 	{
2307 	  fprintf_unfiltered (gdb_stdlog,
2308 			      "SC:  lwp kill %d %s\n",
2309 			      ret,
2310 			      errno ? safe_strerror (errno) : "ERRNO-OK");
2311 	}
2312 
2313       lp->signalled = 1;
2314       gdb_assert (lp->status == 0);
2315     }
2316 
2317   return 0;
2318 }
2319 
2320 /* Request a stop on LWP.  */
2321 
2322 void
2323 linux_stop_lwp (struct lwp_info *lwp)
2324 {
2325   stop_callback (lwp, NULL);
2326 }
2327 
2328 /* Return non-zero if LWP PID has a pending SIGINT.  */
2329 
2330 static int
2331 linux_nat_has_pending_sigint (int pid)
2332 {
2333   sigset_t pending, blocked, ignored;
2334 
2335   linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2336 
2337   if (sigismember (&pending, SIGINT)
2338       && !sigismember (&ignored, SIGINT))
2339     return 1;
2340 
2341   return 0;
2342 }
2343 
2344 /* Set a flag in LP indicating that we should ignore its next SIGINT.  */
2345 
2346 static int
2347 set_ignore_sigint (struct lwp_info *lp, void *data)
2348 {
2349   /* If a thread has a pending SIGINT, consume it; otherwise, set a
2350      flag to consume the next one.  */
2351   if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2352       && WSTOPSIG (lp->status) == SIGINT)
2353     lp->status = 0;
2354   else
2355     lp->ignore_sigint = 1;
2356 
2357   return 0;
2358 }
2359 
2360 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2361    This function is called after we know the LWP has stopped; if the LWP
2362    stopped before the expected SIGINT was delivered, then it will never have
2363    arrived.  Also, if the signal was delivered to a shared queue and consumed
2364    by a different thread, it will never be delivered to this LWP.  */
2365 
2366 static void
2367 maybe_clear_ignore_sigint (struct lwp_info *lp)
2368 {
2369   if (!lp->ignore_sigint)
2370     return;
2371 
2372   if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2373     {
2374       if (debug_linux_nat)
2375 	fprintf_unfiltered (gdb_stdlog,
2376 			    "MCIS: Clearing bogus flag for %s\n",
2377 			    target_pid_to_str (lp->ptid));
2378       lp->ignore_sigint = 0;
2379     }
2380 }
2381 
2382 /* Fetch the possible triggered data watchpoint info and store it in
2383    LP.
2384 
2385    On some archs, like x86, that use debug registers to set
2386    watchpoints, it's possible that the way to know which watched
2387    address trapped, is to check the register that is used to select
2388    which address to watch.  Problem is, between setting the watchpoint
2389    and reading back which data address trapped, the user may change
2390    the set of watchpoints, and, as a consequence, GDB changes the
2391    debug registers in the inferior.  To avoid reading back a stale
2392    stopped-data-address when that happens, we cache in LP the fact
2393    that a watchpoint trapped, and the corresponding data address, as
2394    soon as we see LP stop with a SIGTRAP.  If GDB changes the debug
2395    registers meanwhile, we have the cached data we can rely on.  */
2396 
2397 static int
2398 check_stopped_by_watchpoint (struct lwp_info *lp)
2399 {
2400   struct cleanup *old_chain;
2401 
2402   if (linux_ops->to_stopped_by_watchpoint == NULL)
2403     return 0;
2404 
2405   old_chain = save_inferior_ptid ();
2406   inferior_ptid = lp->ptid;
2407 
2408   if (linux_ops->to_stopped_by_watchpoint (linux_ops))
2409     {
2410       lp->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
2411 
2412       if (linux_ops->to_stopped_data_address != NULL)
2413 	lp->stopped_data_address_p =
2414 	  linux_ops->to_stopped_data_address (&current_target,
2415 					      &lp->stopped_data_address);
2416       else
2417 	lp->stopped_data_address_p = 0;
2418     }
2419 
2420   do_cleanups (old_chain);
2421 
2422   return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
2423 }
2424 
2425 /* Called when the LWP stopped for a trap that could be explained by a
2426    watchpoint or a breakpoint.  */
2427 
2428 static void
2429 save_sigtrap (struct lwp_info *lp)
2430 {
2431   gdb_assert (lp->stop_reason == LWP_STOPPED_BY_NO_REASON);
2432   gdb_assert (lp->status != 0);
2433 
2434   if (check_stopped_by_watchpoint (lp))
2435     return;
2436 
2437   if (linux_nat_status_is_event (lp->status))
2438     check_stopped_by_breakpoint (lp);
2439 }
2440 
2441 /* Returns true if the LWP had stopped for a watchpoint.  */
2442 
2443 static int
2444 linux_nat_stopped_by_watchpoint (struct target_ops *ops)
2445 {
2446   struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2447 
2448   gdb_assert (lp != NULL);
2449 
2450   return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
2451 }
2452 
2453 static int
2454 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2455 {
2456   struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2457 
2458   gdb_assert (lp != NULL);
2459 
2460   *addr_p = lp->stopped_data_address;
2461 
2462   return lp->stopped_data_address_p;
2463 }
2464 
2465 /* Commonly any breakpoint / watchpoint generate only SIGTRAP.  */
2466 
2467 static int
2468 sigtrap_is_event (int status)
2469 {
2470   return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2471 }
2472 
2473 /* Set alternative SIGTRAP-like events recognizer.  If
2474    breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2475    applied.  */
2476 
2477 void
2478 linux_nat_set_status_is_event (struct target_ops *t,
2479 			       int (*status_is_event) (int status))
2480 {
2481   linux_nat_status_is_event = status_is_event;
2482 }
2483 
2484 /* Wait until LP is stopped.  */
2485 
2486 static int
2487 stop_wait_callback (struct lwp_info *lp, void *data)
2488 {
2489   struct inferior *inf = find_inferior_ptid (lp->ptid);
2490 
2491   /* If this is a vfork parent, bail out, it is not going to report
2492      any SIGSTOP until the vfork is done with.  */
2493   if (inf->vfork_child != NULL)
2494     return 0;
2495 
2496   if (!lp->stopped)
2497     {
2498       int status;
2499 
2500       status = wait_lwp (lp);
2501       if (status == 0)
2502 	return 0;
2503 
2504       if (lp->ignore_sigint && WIFSTOPPED (status)
2505 	  && WSTOPSIG (status) == SIGINT)
2506 	{
2507 	  lp->ignore_sigint = 0;
2508 
2509 	  errno = 0;
2510 	  ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2511 	  lp->stopped = 0;
2512 	  if (debug_linux_nat)
2513 	    fprintf_unfiltered (gdb_stdlog,
2514 				"PTRACE_CONT %s, 0, 0 (%s) "
2515 				"(discarding SIGINT)\n",
2516 				target_pid_to_str (lp->ptid),
2517 				errno ? safe_strerror (errno) : "OK");
2518 
2519 	  return stop_wait_callback (lp, NULL);
2520 	}
2521 
2522       maybe_clear_ignore_sigint (lp);
2523 
2524       if (WSTOPSIG (status) != SIGSTOP)
2525 	{
2526 	  /* The thread was stopped with a signal other than SIGSTOP.  */
2527 
2528 	  if (debug_linux_nat)
2529 	    fprintf_unfiltered (gdb_stdlog,
2530 				"SWC: Pending event %s in %s\n",
2531 				status_to_str ((int) status),
2532 				target_pid_to_str (lp->ptid));
2533 
2534 	  /* Save the sigtrap event.  */
2535 	  lp->status = status;
2536 	  gdb_assert (lp->signalled);
2537 	  save_sigtrap (lp);
2538 	}
2539       else
2540 	{
2541 	  /* We caught the SIGSTOP that we intended to catch, so
2542 	     there's no SIGSTOP pending.  */
2543 
2544 	  if (debug_linux_nat)
2545 	    fprintf_unfiltered (gdb_stdlog,
2546 				"SWC: Delayed SIGSTOP caught for %s.\n",
2547 				target_pid_to_str (lp->ptid));
2548 
2549 	  /* Reset SIGNALLED only after the stop_wait_callback call
2550 	     above as it does gdb_assert on SIGNALLED.  */
2551 	  lp->signalled = 0;
2552 	}
2553     }
2554 
2555   return 0;
2556 }
2557 
2558 /* Return non-zero if LP has a wait status pending.  Discard the
2559    pending event and resume the LWP if the event that originally
2560    caused the stop became uninteresting.  */
2561 
2562 static int
2563 status_callback (struct lwp_info *lp, void *data)
2564 {
2565   /* Only report a pending wait status if we pretend that this has
2566      indeed been resumed.  */
2567   if (!lp->resumed)
2568     return 0;
2569 
2570   if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
2571       || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT)
2572     {
2573       struct regcache *regcache = get_thread_regcache (lp->ptid);
2574       struct gdbarch *gdbarch = get_regcache_arch (regcache);
2575       CORE_ADDR pc;
2576       int discard = 0;
2577 
2578       gdb_assert (lp->status != 0);
2579 
2580       pc = regcache_read_pc (regcache);
2581 
2582       if (pc != lp->stop_pc)
2583 	{
2584 	  if (debug_linux_nat)
2585 	    fprintf_unfiltered (gdb_stdlog,
2586 				"SC: PC of %s changed.  was=%s, now=%s\n",
2587 				target_pid_to_str (lp->ptid),
2588 				paddress (target_gdbarch (), lp->stop_pc),
2589 				paddress (target_gdbarch (), pc));
2590 	  discard = 1;
2591 	}
2592       else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2593 	{
2594 	  if (debug_linux_nat)
2595 	    fprintf_unfiltered (gdb_stdlog,
2596 				"SC: previous breakpoint of %s, at %s gone\n",
2597 				target_pid_to_str (lp->ptid),
2598 				paddress (target_gdbarch (), lp->stop_pc));
2599 
2600 	  discard = 1;
2601 	}
2602 
2603       if (discard)
2604 	{
2605 	  if (debug_linux_nat)
2606 	    fprintf_unfiltered (gdb_stdlog,
2607 				"SC: pending event of %s cancelled.\n",
2608 				target_pid_to_str (lp->ptid));
2609 
2610 	  lp->status = 0;
2611 	  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2612 	  return 0;
2613 	}
2614       return 1;
2615     }
2616 
2617   return lwp_status_pending_p (lp);
2618 }
2619 
2620 /* Return non-zero if LP isn't stopped.  */
2621 
2622 static int
2623 running_callback (struct lwp_info *lp, void *data)
2624 {
2625   return (!lp->stopped
2626 	  || (lwp_status_pending_p (lp) && lp->resumed));
2627 }
2628 
2629 /* Count the LWP's that have had events.  */
2630 
2631 static int
2632 count_events_callback (struct lwp_info *lp, void *data)
2633 {
2634   int *count = data;
2635 
2636   gdb_assert (count != NULL);
2637 
2638   /* Select only resumed LWPs that have an event pending.  */
2639   if (lp->resumed && lwp_status_pending_p (lp))
2640     (*count)++;
2641 
2642   return 0;
2643 }
2644 
2645 /* Select the LWP (if any) that is currently being single-stepped.  */
2646 
2647 static int
2648 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2649 {
2650   if (lp->last_resume_kind == resume_step
2651       && lp->status != 0)
2652     return 1;
2653   else
2654     return 0;
2655 }
2656 
2657 /* Returns true if LP has a status pending.  */
2658 
2659 static int
2660 lwp_status_pending_p (struct lwp_info *lp)
2661 {
2662   /* We check for lp->waitstatus in addition to lp->status, because we
2663      can have pending process exits recorded in lp->status and
2664      W_EXITCODE(0,0) happens to be 0.  */
2665   return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2666 }
2667 
2668 /* Select the Nth LWP that has had a SIGTRAP event.  */
2669 
2670 static int
2671 select_event_lwp_callback (struct lwp_info *lp, void *data)
2672 {
2673   int *selector = data;
2674 
2675   gdb_assert (selector != NULL);
2676 
2677   /* Select only resumed LWPs that have an event pending.  */
2678   if (lp->resumed && lwp_status_pending_p (lp))
2679     if ((*selector)-- == 0)
2680       return 1;
2681 
2682   return 0;
2683 }
2684 
2685 /* Called when the LWP got a signal/trap that could be explained by a
2686    software or hardware breakpoint.  */
2687 
2688 static int
2689 check_stopped_by_breakpoint (struct lwp_info *lp)
2690 {
2691   /* Arrange for a breakpoint to be hit again later.  We don't keep
2692      the SIGTRAP status and don't forward the SIGTRAP signal to the
2693      LWP.  We will handle the current event, eventually we will resume
2694      this LWP, and this breakpoint will trap again.
2695 
2696      If we do not do this, then we run the risk that the user will
2697      delete or disable the breakpoint, but the LWP will have already
2698      tripped on it.  */
2699 
2700   struct regcache *regcache = get_thread_regcache (lp->ptid);
2701   struct gdbarch *gdbarch = get_regcache_arch (regcache);
2702   CORE_ADDR pc;
2703   CORE_ADDR sw_bp_pc;
2704 
2705   pc = regcache_read_pc (regcache);
2706   sw_bp_pc = pc - target_decr_pc_after_break (gdbarch);
2707 
2708   if ((!lp->step || lp->stop_pc == sw_bp_pc)
2709       && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2710 					      sw_bp_pc))
2711     {
2712       /* The LWP was either continued, or stepped a software
2713 	 breakpoint instruction.  */
2714       if (debug_linux_nat)
2715 	fprintf_unfiltered (gdb_stdlog,
2716 			    "CB: Push back software breakpoint for %s\n",
2717 			    target_pid_to_str (lp->ptid));
2718 
2719       /* Back up the PC if necessary.  */
2720       if (pc != sw_bp_pc)
2721 	regcache_write_pc (regcache, sw_bp_pc);
2722 
2723       lp->stop_pc = sw_bp_pc;
2724       lp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
2725       return 1;
2726     }
2727 
2728   if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2729     {
2730       if (debug_linux_nat)
2731 	fprintf_unfiltered (gdb_stdlog,
2732 			    "CB: Push back hardware breakpoint for %s\n",
2733 			    target_pid_to_str (lp->ptid));
2734 
2735       lp->stop_pc = pc;
2736       lp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
2737       return 1;
2738     }
2739 
2740   return 0;
2741 }
2742 
2743 /* Select one LWP out of those that have events pending.  */
2744 
2745 static void
2746 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2747 {
2748   int num_events = 0;
2749   int random_selector;
2750   struct lwp_info *event_lp = NULL;
2751 
2752   /* Record the wait status for the original LWP.  */
2753   (*orig_lp)->status = *status;
2754 
2755   /* In all-stop, give preference to the LWP that is being
2756      single-stepped.  There will be at most one, and it will be the
2757      LWP that the core is most interested in.  If we didn't do this,
2758      then we'd have to handle pending step SIGTRAPs somehow in case
2759      the core later continues the previously-stepped thread, as
2760      otherwise we'd report the pending SIGTRAP then, and the core, not
2761      having stepped the thread, wouldn't understand what the trap was
2762      for, and therefore would report it to the user as a random
2763      signal.  */
2764   if (!non_stop)
2765     {
2766       event_lp = iterate_over_lwps (filter,
2767 				    select_singlestep_lwp_callback, NULL);
2768       if (event_lp != NULL)
2769 	{
2770 	  if (debug_linux_nat)
2771 	    fprintf_unfiltered (gdb_stdlog,
2772 				"SEL: Select single-step %s\n",
2773 				target_pid_to_str (event_lp->ptid));
2774 	}
2775     }
2776 
2777   if (event_lp == NULL)
2778     {
2779       /* Pick one at random, out of those which have had events.  */
2780 
2781       /* First see how many events we have.  */
2782       iterate_over_lwps (filter, count_events_callback, &num_events);
2783 
2784       /* Now randomly pick a LWP out of those that have had
2785 	 events.  */
2786       random_selector = (int)
2787 	((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2788 
2789       if (debug_linux_nat && num_events > 1)
2790 	fprintf_unfiltered (gdb_stdlog,
2791 			    "SEL: Found %d events, selecting #%d\n",
2792 			    num_events, random_selector);
2793 
2794       event_lp = iterate_over_lwps (filter,
2795 				    select_event_lwp_callback,
2796 				    &random_selector);
2797     }
2798 
2799   if (event_lp != NULL)
2800     {
2801       /* Switch the event LWP.  */
2802       *orig_lp = event_lp;
2803       *status = event_lp->status;
2804     }
2805 
2806   /* Flush the wait status for the event LWP.  */
2807   (*orig_lp)->status = 0;
2808 }
2809 
2810 /* Return non-zero if LP has been resumed.  */
2811 
2812 static int
2813 resumed_callback (struct lwp_info *lp, void *data)
2814 {
2815   return lp->resumed;
2816 }
2817 
2818 /* Stop an active thread, verify it still exists, then resume it.  If
2819    the thread ends up with a pending status, then it is not resumed,
2820    and *DATA (really a pointer to int), is set.  */
2821 
2822 static int
2823 stop_and_resume_callback (struct lwp_info *lp, void *data)
2824 {
2825   if (!lp->stopped)
2826     {
2827       ptid_t ptid = lp->ptid;
2828 
2829       stop_callback (lp, NULL);
2830       stop_wait_callback (lp, NULL);
2831 
2832       /* Resume if the lwp still exists, and the core wanted it
2833 	 running.  */
2834       lp = find_lwp_pid (ptid);
2835       if (lp != NULL)
2836 	{
2837 	  if (lp->last_resume_kind == resume_stop
2838 	      && !lwp_status_pending_p (lp))
2839 	    {
2840 	      /* The core wanted the LWP to stop.  Even if it stopped
2841 		 cleanly (with SIGSTOP), leave the event pending.  */
2842 	      if (debug_linux_nat)
2843 		fprintf_unfiltered (gdb_stdlog,
2844 				    "SARC: core wanted LWP %ld stopped "
2845 				    "(leaving SIGSTOP pending)\n",
2846 				    ptid_get_lwp (lp->ptid));
2847 	      lp->status = W_STOPCODE (SIGSTOP);
2848 	    }
2849 
2850 	  if (!lwp_status_pending_p (lp))
2851 	    {
2852 	      if (debug_linux_nat)
2853 		fprintf_unfiltered (gdb_stdlog,
2854 				    "SARC: re-resuming LWP %ld\n",
2855 				    ptid_get_lwp (lp->ptid));
2856 	      resume_lwp (lp, lp->step, GDB_SIGNAL_0);
2857 	    }
2858 	  else
2859 	    {
2860 	      if (debug_linux_nat)
2861 		fprintf_unfiltered (gdb_stdlog,
2862 				    "SARC: not re-resuming LWP %ld "
2863 				    "(has pending)\n",
2864 				    ptid_get_lwp (lp->ptid));
2865 	    }
2866 	}
2867     }
2868   return 0;
2869 }
2870 
2871 /* Check if we should go on and pass this event to common code.
2872    Return the affected lwp if we are, or NULL otherwise.  */
2873 
2874 static struct lwp_info *
2875 linux_nat_filter_event (int lwpid, int status)
2876 {
2877   struct lwp_info *lp;
2878   int event = linux_ptrace_get_extended_event (status);
2879 
2880   lp = find_lwp_pid (pid_to_ptid (lwpid));
2881 
2882   /* Check for stop events reported by a process we didn't already
2883      know about - anything not already in our LWP list.
2884 
2885      If we're expecting to receive stopped processes after
2886      fork, vfork, and clone events, then we'll just add the
2887      new one to our list and go back to waiting for the event
2888      to be reported - the stopped process might be returned
2889      from waitpid before or after the event is.
2890 
2891      But note the case of a non-leader thread exec'ing after the
2892      leader having exited, and gone from our lists.  The non-leader
2893      thread changes its tid to the tgid.  */
2894 
2895   if (WIFSTOPPED (status) && lp == NULL
2896       && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2897     {
2898       /* A multi-thread exec after we had seen the leader exiting.  */
2899       if (debug_linux_nat)
2900 	fprintf_unfiltered (gdb_stdlog,
2901 			    "LLW: Re-adding thread group leader LWP %d.\n",
2902 			    lwpid);
2903 
2904       lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2905       lp->stopped = 1;
2906       lp->resumed = 1;
2907       add_thread (lp->ptid);
2908     }
2909 
2910   if (WIFSTOPPED (status) && !lp)
2911     {
2912       add_to_pid_list (&stopped_pids, lwpid, status);
2913       return NULL;
2914     }
2915 
2916   /* Make sure we don't report an event for the exit of an LWP not in
2917      our list, i.e. not part of the current process.  This can happen
2918      if we detach from a program we originally forked and then it
2919      exits.  */
2920   if (!WIFSTOPPED (status) && !lp)
2921     return NULL;
2922 
2923   /* This LWP is stopped now.  (And if dead, this prevents it from
2924      ever being continued.)  */
2925   lp->stopped = 1;
2926 
2927   if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2928     {
2929       struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2930 
2931       linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2932       lp->must_set_ptrace_flags = 0;
2933     }
2934 
2935   /* Handle GNU/Linux's syscall SIGTRAPs.  */
2936   if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2937     {
2938       /* No longer need the sysgood bit.  The ptrace event ends up
2939 	 recorded in lp->waitstatus if we care for it.  We can carry
2940 	 on handling the event like a regular SIGTRAP from here
2941 	 on.  */
2942       status = W_STOPCODE (SIGTRAP);
2943       if (linux_handle_syscall_trap (lp, 0))
2944 	return NULL;
2945     }
2946 
2947   /* Handle GNU/Linux's extended waitstatus for trace events.  */
2948   if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2949       && linux_is_extended_waitstatus (status))
2950     {
2951       if (debug_linux_nat)
2952 	fprintf_unfiltered (gdb_stdlog,
2953 			    "LLW: Handling extended status 0x%06x\n",
2954 			    status);
2955       if (linux_handle_extended_wait (lp, status, 0))
2956 	return NULL;
2957     }
2958 
2959   /* Check if the thread has exited.  */
2960   if (WIFEXITED (status) || WIFSIGNALED (status))
2961     {
2962       if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2963 	{
2964 	  /* If this is the main thread, we must stop all threads and
2965 	     verify if they are still alive.  This is because in the
2966 	     nptl thread model on Linux 2.4, there is no signal issued
2967 	     for exiting LWPs other than the main thread.  We only get
2968 	     the main thread exit signal once all child threads have
2969 	     already exited.  If we stop all the threads and use the
2970 	     stop_wait_callback to check if they have exited we can
2971 	     determine whether this signal should be ignored or
2972 	     whether it means the end of the debugged application,
2973 	     regardless of which threading model is being used.  */
2974 	  if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2975 	    {
2976 	      iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2977 				 stop_and_resume_callback, NULL);
2978 	    }
2979 
2980 	  if (debug_linux_nat)
2981 	    fprintf_unfiltered (gdb_stdlog,
2982 				"LLW: %s exited.\n",
2983 				target_pid_to_str (lp->ptid));
2984 
2985 	  if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2986 	    {
2987 	      /* If there is at least one more LWP, then the exit signal
2988 		 was not the end of the debugged application and should be
2989 		 ignored.  */
2990 	      exit_lwp (lp);
2991 	      return NULL;
2992 	    }
2993 	}
2994 
2995       gdb_assert (lp->resumed);
2996 
2997       if (debug_linux_nat)
2998 	fprintf_unfiltered (gdb_stdlog,
2999 			    "Process %ld exited\n",
3000 			    ptid_get_lwp (lp->ptid));
3001 
3002       /* This was the last lwp in the process.  Since events are
3003 	 serialized to GDB core, we may not be able report this one
3004 	 right now, but GDB core and the other target layers will want
3005 	 to be notified about the exit code/signal, leave the status
3006 	 pending for the next time we're able to report it.  */
3007 
3008       /* Dead LWP's aren't expected to reported a pending sigstop.  */
3009       lp->signalled = 0;
3010 
3011       /* Store the pending event in the waitstatus, because
3012 	 W_EXITCODE(0,0) == 0.  */
3013       store_waitstatus (&lp->waitstatus, status);
3014       return lp;
3015     }
3016 
3017   /* Check if the current LWP has previously exited.  In the nptl
3018      thread model, LWPs other than the main thread do not issue
3019      signals when they exit so we must check whenever the thread has
3020      stopped.  A similar check is made in stop_wait_callback().  */
3021   if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3022     {
3023       ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
3024 
3025       if (debug_linux_nat)
3026 	fprintf_unfiltered (gdb_stdlog,
3027 			    "LLW: %s exited.\n",
3028 			    target_pid_to_str (lp->ptid));
3029 
3030       exit_lwp (lp);
3031 
3032       /* Make sure there is at least one thread running.  */
3033       gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3034 
3035       /* Discard the event.  */
3036       return NULL;
3037     }
3038 
3039   /* Make sure we don't report a SIGSTOP that we sent ourselves in
3040      an attempt to stop an LWP.  */
3041   if (lp->signalled
3042       && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3043     {
3044       if (debug_linux_nat)
3045 	fprintf_unfiltered (gdb_stdlog,
3046 			    "LLW: Delayed SIGSTOP caught for %s.\n",
3047 			    target_pid_to_str (lp->ptid));
3048 
3049       lp->signalled = 0;
3050 
3051       if (lp->last_resume_kind != resume_stop)
3052 	{
3053 	  /* This is a delayed SIGSTOP.  */
3054 
3055 	  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3056 	  if (debug_linux_nat)
3057 	    fprintf_unfiltered (gdb_stdlog,
3058 				"LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3059 				lp->step ?
3060 				"PTRACE_SINGLESTEP" : "PTRACE_CONT",
3061 				target_pid_to_str (lp->ptid));
3062 
3063 	  gdb_assert (lp->resumed);
3064 
3065 	  /* Discard the event.  */
3066 	  return NULL;
3067 	}
3068     }
3069 
3070   /* Make sure we don't report a SIGINT that we have already displayed
3071      for another thread.  */
3072   if (lp->ignore_sigint
3073       && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3074     {
3075       if (debug_linux_nat)
3076 	fprintf_unfiltered (gdb_stdlog,
3077 			    "LLW: Delayed SIGINT caught for %s.\n",
3078 			    target_pid_to_str (lp->ptid));
3079 
3080       /* This is a delayed SIGINT.  */
3081       lp->ignore_sigint = 0;
3082 
3083       linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3084       if (debug_linux_nat)
3085 	fprintf_unfiltered (gdb_stdlog,
3086 			    "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3087 			    lp->step ?
3088 			    "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3089 			    target_pid_to_str (lp->ptid));
3090       gdb_assert (lp->resumed);
3091 
3092       /* Discard the event.  */
3093       return NULL;
3094     }
3095 
3096   /* Don't report signals that GDB isn't interested in, such as
3097      signals that are neither printed nor stopped upon.  Stopping all
3098      threads can be a bit time-consuming so if we want decent
3099      performance with heavily multi-threaded programs, especially when
3100      they're using a high frequency timer, we'd better avoid it if we
3101      can.  */
3102   if (WIFSTOPPED (status))
3103     {
3104       enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3105 
3106       if (!non_stop)
3107 	{
3108 	  /* Only do the below in all-stop, as we currently use SIGSTOP
3109 	     to implement target_stop (see linux_nat_stop) in
3110 	     non-stop.  */
3111 	  if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3112 	    {
3113 	      /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3114 		 forwarded to the entire process group, that is, all LWPs
3115 		 will receive it - unless they're using CLONE_THREAD to
3116 		 share signals.  Since we only want to report it once, we
3117 		 mark it as ignored for all LWPs except this one.  */
3118 	      iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3119 					      set_ignore_sigint, NULL);
3120 	      lp->ignore_sigint = 0;
3121 	    }
3122 	  else
3123 	    maybe_clear_ignore_sigint (lp);
3124 	}
3125 
3126       /* When using hardware single-step, we need to report every signal.
3127 	 Otherwise, signals in pass_mask may be short-circuited.  */
3128       if (!lp->step
3129 	  && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3130 	{
3131 	  linux_resume_one_lwp (lp, lp->step, signo);
3132 	  if (debug_linux_nat)
3133 	    fprintf_unfiltered (gdb_stdlog,
3134 				"LLW: %s %s, %s (preempt 'handle')\n",
3135 				lp->step ?
3136 				"PTRACE_SINGLESTEP" : "PTRACE_CONT",
3137 				target_pid_to_str (lp->ptid),
3138 				(signo != GDB_SIGNAL_0
3139 				 ? strsignal (gdb_signal_to_host (signo))
3140 				 : "0"));
3141 	  return NULL;
3142 	}
3143     }
3144 
3145   /* An interesting event.  */
3146   gdb_assert (lp);
3147   lp->status = status;
3148   save_sigtrap (lp);
3149   return lp;
3150 }
3151 
3152 /* Detect zombie thread group leaders, and "exit" them.  We can't reap
3153    their exits until all other threads in the group have exited.  */
3154 
3155 static void
3156 check_zombie_leaders (void)
3157 {
3158   struct inferior *inf;
3159 
3160   ALL_INFERIORS (inf)
3161     {
3162       struct lwp_info *leader_lp;
3163 
3164       if (inf->pid == 0)
3165 	continue;
3166 
3167       leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3168       if (leader_lp != NULL
3169 	  /* Check if there are other threads in the group, as we may
3170 	     have raced with the inferior simply exiting.  */
3171 	  && num_lwps (inf->pid) > 1
3172 	  && linux_proc_pid_is_zombie (inf->pid))
3173 	{
3174 	  if (debug_linux_nat)
3175 	    fprintf_unfiltered (gdb_stdlog,
3176 				"CZL: Thread group leader %d zombie "
3177 				"(it exited, or another thread execd).\n",
3178 				inf->pid);
3179 
3180 	  /* A leader zombie can mean one of two things:
3181 
3182 	     - It exited, and there's an exit status pending
3183 	     available, or only the leader exited (not the whole
3184 	     program).  In the latter case, we can't waitpid the
3185 	     leader's exit status until all other threads are gone.
3186 
3187 	     - There are 3 or more threads in the group, and a thread
3188 	     other than the leader exec'd.  On an exec, the Linux
3189 	     kernel destroys all other threads (except the execing
3190 	     one) in the thread group, and resets the execing thread's
3191 	     tid to the tgid.  No exit notification is sent for the
3192 	     execing thread -- from the ptracer's perspective, it
3193 	     appears as though the execing thread just vanishes.
3194 	     Until we reap all other threads except the leader and the
3195 	     execing thread, the leader will be zombie, and the
3196 	     execing thread will be in `D (disc sleep)'.  As soon as
3197 	     all other threads are reaped, the execing thread changes
3198 	     it's tid to the tgid, and the previous (zombie) leader
3199 	     vanishes, giving place to the "new" leader.  We could try
3200 	     distinguishing the exit and exec cases, by waiting once
3201 	     more, and seeing if something comes out, but it doesn't
3202 	     sound useful.  The previous leader _does_ go away, and
3203 	     we'll re-add the new one once we see the exec event
3204 	     (which is just the same as what would happen if the
3205 	     previous leader did exit voluntarily before some other
3206 	     thread execs).  */
3207 
3208 	  if (debug_linux_nat)
3209 	    fprintf_unfiltered (gdb_stdlog,
3210 				"CZL: Thread group leader %d vanished.\n",
3211 				inf->pid);
3212 	  exit_lwp (leader_lp);
3213 	}
3214     }
3215 }
3216 
3217 static ptid_t
3218 linux_nat_wait_1 (struct target_ops *ops,
3219 		  ptid_t ptid, struct target_waitstatus *ourstatus,
3220 		  int target_options)
3221 {
3222   sigset_t prev_mask;
3223   enum resume_kind last_resume_kind;
3224   struct lwp_info *lp;
3225   int status;
3226 
3227   if (debug_linux_nat)
3228     fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3229 
3230   /* The first time we get here after starting a new inferior, we may
3231      not have added it to the LWP list yet - this is the earliest
3232      moment at which we know its PID.  */
3233   if (ptid_is_pid (inferior_ptid))
3234     {
3235       /* Upgrade the main thread's ptid.  */
3236       thread_change_ptid (inferior_ptid,
3237 			  ptid_build (ptid_get_pid (inferior_ptid),
3238 				      ptid_get_pid (inferior_ptid), 0));
3239 
3240       lp = add_initial_lwp (inferior_ptid);
3241       lp->resumed = 1;
3242     }
3243 
3244   /* Make sure SIGCHLD is blocked until the sigsuspend below.  */
3245   block_child_signals (&prev_mask);
3246 
3247   /* First check if there is a LWP with a wait status pending.  */
3248   lp = iterate_over_lwps (ptid, status_callback, NULL);
3249   if (lp != NULL)
3250     {
3251       if (debug_linux_nat)
3252 	fprintf_unfiltered (gdb_stdlog,
3253 			    "LLW: Using pending wait status %s for %s.\n",
3254 			    status_to_str (lp->status),
3255 			    target_pid_to_str (lp->ptid));
3256     }
3257 
3258   if (!target_is_async_p ())
3259     {
3260       /* Causes SIGINT to be passed on to the attached process.  */
3261       set_sigint_trap ();
3262     }
3263 
3264   /* But if we don't find a pending event, we'll have to wait.  Always
3265      pull all events out of the kernel.  We'll randomly select an
3266      event LWP out of all that have events, to prevent starvation.  */
3267 
3268   while (lp == NULL)
3269     {
3270       pid_t lwpid;
3271 
3272       /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3273 	 quirks:
3274 
3275 	 - If the thread group leader exits while other threads in the
3276 	   thread group still exist, waitpid(TGID, ...) hangs.  That
3277 	   waitpid won't return an exit status until the other threads
3278 	   in the group are reapped.
3279 
3280 	 - When a non-leader thread execs, that thread just vanishes
3281 	   without reporting an exit (so we'd hang if we waited for it
3282 	   explicitly in that case).  The exec event is reported to
3283 	   the TGID pid.  */
3284 
3285       errno = 0;
3286       lwpid = my_waitpid (-1, &status,  __WCLONE | WNOHANG);
3287       if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3288 	lwpid = my_waitpid (-1, &status, WNOHANG);
3289 
3290       if (debug_linux_nat)
3291 	fprintf_unfiltered (gdb_stdlog,
3292 			    "LNW: waitpid(-1, ...) returned %d, %s\n",
3293 			    lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3294 
3295       if (lwpid > 0)
3296 	{
3297 	  if (debug_linux_nat)
3298 	    {
3299 	      fprintf_unfiltered (gdb_stdlog,
3300 				  "LLW: waitpid %ld received %s\n",
3301 				  (long) lwpid, status_to_str (status));
3302 	    }
3303 
3304 	  linux_nat_filter_event (lwpid, status);
3305 	  /* Retry until nothing comes out of waitpid.  A single
3306 	     SIGCHLD can indicate more than one child stopped.  */
3307 	  continue;
3308 	}
3309 
3310       /* Now that we've pulled all events out of the kernel, check if
3311 	 there's any LWP with a status to report to the core.  */
3312       lp = iterate_over_lwps (ptid, status_callback, NULL);
3313       if (lp != NULL)
3314 	break;
3315 
3316       /* Check for zombie thread group leaders.  Those can't be reaped
3317 	 until all other threads in the thread group are.  */
3318       check_zombie_leaders ();
3319 
3320       /* If there are no resumed children left, bail.  We'd be stuck
3321 	 forever in the sigsuspend call below otherwise.  */
3322       if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3323 	{
3324 	  if (debug_linux_nat)
3325 	    fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3326 
3327 	  ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3328 
3329 	  if (!target_is_async_p ())
3330 	    clear_sigint_trap ();
3331 
3332 	  restore_child_signals_mask (&prev_mask);
3333 	  return minus_one_ptid;
3334 	}
3335 
3336       /* No interesting event to report to the core.  */
3337 
3338       if (target_options & TARGET_WNOHANG)
3339 	{
3340 	  if (debug_linux_nat)
3341 	    fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3342 
3343 	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
3344 	  restore_child_signals_mask (&prev_mask);
3345 	  return minus_one_ptid;
3346 	}
3347 
3348       /* We shouldn't end up here unless we want to try again.  */
3349       gdb_assert (lp == NULL);
3350 
3351       /* Block until we get an event reported with SIGCHLD.  */
3352       if (debug_linux_nat)
3353 	fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3354       sigsuspend (&suspend_mask);
3355     }
3356 
3357   if (!target_is_async_p ())
3358     clear_sigint_trap ();
3359 
3360   gdb_assert (lp);
3361 
3362   status = lp->status;
3363   lp->status = 0;
3364 
3365   if (!non_stop)
3366     {
3367       /* Now stop all other LWP's ...  */
3368       iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3369 
3370       /* ... and wait until all of them have reported back that
3371 	 they're no longer running.  */
3372       iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3373     }
3374 
3375   /* If we're not waiting for a specific LWP, choose an event LWP from
3376      among those that have had events.  Giving equal priority to all
3377      LWPs that have had events helps prevent starvation.  */
3378   if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3379     select_event_lwp (ptid, &lp, &status);
3380 
3381   gdb_assert (lp != NULL);
3382 
3383   /* Now that we've selected our final event LWP, un-adjust its PC if
3384      it was a software breakpoint.  */
3385   if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
3386     {
3387       struct regcache *regcache = get_thread_regcache (lp->ptid);
3388       struct gdbarch *gdbarch = get_regcache_arch (regcache);
3389       int decr_pc = target_decr_pc_after_break (gdbarch);
3390 
3391       if (decr_pc != 0)
3392 	{
3393 	  CORE_ADDR pc;
3394 
3395 	  pc = regcache_read_pc (regcache);
3396 	  regcache_write_pc (regcache, pc + decr_pc);
3397 	}
3398     }
3399 
3400   /* We'll need this to determine whether to report a SIGSTOP as
3401      GDB_SIGNAL_0.  Need to take a copy because resume_clear_callback
3402      clears it.  */
3403   last_resume_kind = lp->last_resume_kind;
3404 
3405   if (!non_stop)
3406     {
3407       /* In all-stop, from the core's perspective, all LWPs are now
3408 	 stopped until a new resume action is sent over.  */
3409       iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3410     }
3411   else
3412     {
3413       resume_clear_callback (lp, NULL);
3414     }
3415 
3416   if (linux_nat_status_is_event (status))
3417     {
3418       if (debug_linux_nat)
3419 	fprintf_unfiltered (gdb_stdlog,
3420 			    "LLW: trap ptid is %s.\n",
3421 			    target_pid_to_str (lp->ptid));
3422     }
3423 
3424   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3425     {
3426       *ourstatus = lp->waitstatus;
3427       lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3428     }
3429   else
3430     store_waitstatus (ourstatus, status);
3431 
3432   if (debug_linux_nat)
3433     fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3434 
3435   restore_child_signals_mask (&prev_mask);
3436 
3437   if (last_resume_kind == resume_stop
3438       && ourstatus->kind == TARGET_WAITKIND_STOPPED
3439       && WSTOPSIG (status) == SIGSTOP)
3440     {
3441       /* A thread that has been requested to stop by GDB with
3442 	 target_stop, and it stopped cleanly, so report as SIG0.  The
3443 	 use of SIGSTOP is an implementation detail.  */
3444       ourstatus->value.sig = GDB_SIGNAL_0;
3445     }
3446 
3447   if (ourstatus->kind == TARGET_WAITKIND_EXITED
3448       || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3449     lp->core = -1;
3450   else
3451     lp->core = linux_common_core_of_thread (lp->ptid);
3452 
3453   return lp->ptid;
3454 }
3455 
3456 /* Resume LWPs that are currently stopped without any pending status
3457    to report, but are resumed from the core's perspective.  */
3458 
3459 static int
3460 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3461 {
3462   ptid_t *wait_ptid_p = data;
3463 
3464   if (lp->stopped
3465       && lp->resumed
3466       && !lwp_status_pending_p (lp))
3467     {
3468       struct regcache *regcache = get_thread_regcache (lp->ptid);
3469       struct gdbarch *gdbarch = get_regcache_arch (regcache);
3470       CORE_ADDR pc = regcache_read_pc (regcache);
3471 
3472       gdb_assert (is_executing (lp->ptid));
3473 
3474       /* Don't bother if there's a breakpoint at PC that we'd hit
3475 	 immediately, and we're not waiting for this LWP.  */
3476       if (!ptid_match (lp->ptid, *wait_ptid_p))
3477 	{
3478 	  if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3479 	    return 0;
3480 	}
3481 
3482       if (debug_linux_nat)
3483 	fprintf_unfiltered (gdb_stdlog,
3484 			    "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3485 			    target_pid_to_str (lp->ptid),
3486 			    paddress (gdbarch, pc),
3487 			    lp->step);
3488 
3489       linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3490     }
3491 
3492   return 0;
3493 }
3494 
3495 static ptid_t
3496 linux_nat_wait (struct target_ops *ops,
3497 		ptid_t ptid, struct target_waitstatus *ourstatus,
3498 		int target_options)
3499 {
3500   ptid_t event_ptid;
3501 
3502   if (debug_linux_nat)
3503     {
3504       char *options_string;
3505 
3506       options_string = target_options_to_string (target_options);
3507       fprintf_unfiltered (gdb_stdlog,
3508 			  "linux_nat_wait: [%s], [%s]\n",
3509 			  target_pid_to_str (ptid),
3510 			  options_string);
3511       xfree (options_string);
3512     }
3513 
3514   /* Flush the async file first.  */
3515   if (target_is_async_p ())
3516     async_file_flush ();
3517 
3518   /* Resume LWPs that are currently stopped without any pending status
3519      to report, but are resumed from the core's perspective.  LWPs get
3520      in this state if we find them stopping at a time we're not
3521      interested in reporting the event (target_wait on a
3522      specific_process, for example, see linux_nat_wait_1), and
3523      meanwhile the event became uninteresting.  Don't bother resuming
3524      LWPs we're not going to wait for if they'd stop immediately.  */
3525   if (non_stop)
3526     iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3527 
3528   event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3529 
3530   /* If we requested any event, and something came out, assume there
3531      may be more.  If we requested a specific lwp or process, also
3532      assume there may be more.  */
3533   if (target_is_async_p ()
3534       && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3535 	   && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3536 	  || !ptid_equal (ptid, minus_one_ptid)))
3537     async_file_mark ();
3538 
3539   return event_ptid;
3540 }
3541 
3542 static int
3543 kill_callback (struct lwp_info *lp, void *data)
3544 {
3545   /* PTRACE_KILL may resume the inferior.  Send SIGKILL first.  */
3546 
3547   errno = 0;
3548   kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3549   if (debug_linux_nat)
3550     {
3551       int save_errno = errno;
3552 
3553       fprintf_unfiltered (gdb_stdlog,
3554 			  "KC:  kill (SIGKILL) %s, 0, 0 (%s)\n",
3555 			  target_pid_to_str (lp->ptid),
3556 			  save_errno ? safe_strerror (save_errno) : "OK");
3557     }
3558 
3559   /* Some kernels ignore even SIGKILL for processes under ptrace.  */
3560 
3561   errno = 0;
3562   ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3563   if (debug_linux_nat)
3564     {
3565       int save_errno = errno;
3566 
3567       fprintf_unfiltered (gdb_stdlog,
3568 			  "KC:  PTRACE_KILL %s, 0, 0 (%s)\n",
3569 			  target_pid_to_str (lp->ptid),
3570 			  save_errno ? safe_strerror (save_errno) : "OK");
3571     }
3572 
3573   return 0;
3574 }
3575 
3576 static int
3577 kill_wait_callback (struct lwp_info *lp, void *data)
3578 {
3579   pid_t pid;
3580 
3581   /* We must make sure that there are no pending events (delayed
3582      SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3583      program doesn't interfere with any following debugging session.  */
3584 
3585   /* For cloned processes we must check both with __WCLONE and
3586      without, since the exit status of a cloned process isn't reported
3587      with __WCLONE.  */
3588   if (lp->cloned)
3589     {
3590       do
3591 	{
3592 	  pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3593 	  if (pid != (pid_t) -1)
3594 	    {
3595 	      if (debug_linux_nat)
3596 		fprintf_unfiltered (gdb_stdlog,
3597 				    "KWC: wait %s received unknown.\n",
3598 				    target_pid_to_str (lp->ptid));
3599 	      /* The Linux kernel sometimes fails to kill a thread
3600 		 completely after PTRACE_KILL; that goes from the stop
3601 		 point in do_fork out to the one in
3602 		 get_signal_to_deliever and waits again.  So kill it
3603 		 again.  */
3604 	      kill_callback (lp, NULL);
3605 	    }
3606 	}
3607       while (pid == ptid_get_lwp (lp->ptid));
3608 
3609       gdb_assert (pid == -1 && errno == ECHILD);
3610     }
3611 
3612   do
3613     {
3614       pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3615       if (pid != (pid_t) -1)
3616 	{
3617 	  if (debug_linux_nat)
3618 	    fprintf_unfiltered (gdb_stdlog,
3619 				"KWC: wait %s received unk.\n",
3620 				target_pid_to_str (lp->ptid));
3621 	  /* See the call to kill_callback above.  */
3622 	  kill_callback (lp, NULL);
3623 	}
3624     }
3625   while (pid == ptid_get_lwp (lp->ptid));
3626 
3627   gdb_assert (pid == -1 && errno == ECHILD);
3628   return 0;
3629 }
3630 
3631 static void
3632 linux_nat_kill (struct target_ops *ops)
3633 {
3634   struct target_waitstatus last;
3635   ptid_t last_ptid;
3636   int status;
3637 
3638   /* If we're stopped while forking and we haven't followed yet,
3639      kill the other task.  We need to do this first because the
3640      parent will be sleeping if this is a vfork.  */
3641 
3642   get_last_target_status (&last_ptid, &last);
3643 
3644   if (last.kind == TARGET_WAITKIND_FORKED
3645       || last.kind == TARGET_WAITKIND_VFORKED)
3646     {
3647       ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3648       wait (&status);
3649 
3650       /* Let the arch-specific native code know this process is
3651 	 gone.  */
3652       linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
3653     }
3654 
3655   if (forks_exist_p ())
3656     linux_fork_killall ();
3657   else
3658     {
3659       ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3660 
3661       /* Stop all threads before killing them, since ptrace requires
3662 	 that the thread is stopped to sucessfully PTRACE_KILL.  */
3663       iterate_over_lwps (ptid, stop_callback, NULL);
3664       /* ... and wait until all of them have reported back that
3665 	 they're no longer running.  */
3666       iterate_over_lwps (ptid, stop_wait_callback, NULL);
3667 
3668       /* Kill all LWP's ...  */
3669       iterate_over_lwps (ptid, kill_callback, NULL);
3670 
3671       /* ... and wait until we've flushed all events.  */
3672       iterate_over_lwps (ptid, kill_wait_callback, NULL);
3673     }
3674 
3675   target_mourn_inferior ();
3676 }
3677 
3678 static void
3679 linux_nat_mourn_inferior (struct target_ops *ops)
3680 {
3681   int pid = ptid_get_pid (inferior_ptid);
3682 
3683   purge_lwp_list (pid);
3684 
3685   if (! forks_exist_p ())
3686     /* Normal case, no other forks available.  */
3687     linux_ops->to_mourn_inferior (ops);
3688   else
3689     /* Multi-fork case.  The current inferior_ptid has exited, but
3690        there are other viable forks to debug.  Delete the exiting
3691        one and context-switch to the first available.  */
3692     linux_fork_mourn_inferior ();
3693 
3694   /* Let the arch-specific native code know this process is gone.  */
3695   linux_nat_forget_process (pid);
3696 }
3697 
3698 /* Convert a native/host siginfo object, into/from the siginfo in the
3699    layout of the inferiors' architecture.  */
3700 
3701 static void
3702 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3703 {
3704   int done = 0;
3705 
3706   if (linux_nat_siginfo_fixup != NULL)
3707     done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3708 
3709   /* If there was no callback, or the callback didn't do anything,
3710      then just do a straight memcpy.  */
3711   if (!done)
3712     {
3713       if (direction == 1)
3714 	memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3715       else
3716 	memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3717     }
3718 }
3719 
3720 static enum target_xfer_status
3721 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3722                     const char *annex, gdb_byte *readbuf,
3723 		    const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3724 		    ULONGEST *xfered_len)
3725 {
3726   int pid;
3727   siginfo_t siginfo;
3728   gdb_byte inf_siginfo[sizeof (siginfo_t)];
3729 
3730   gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3731   gdb_assert (readbuf || writebuf);
3732 
3733   pid = ptid_get_lwp (inferior_ptid);
3734   if (pid == 0)
3735     pid = ptid_get_pid (inferior_ptid);
3736 
3737   if (offset > sizeof (siginfo))
3738     return TARGET_XFER_E_IO;
3739 
3740   errno = 0;
3741   ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3742   if (errno != 0)
3743     return TARGET_XFER_E_IO;
3744 
3745   /* When GDB is built as a 64-bit application, ptrace writes into
3746      SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
3747      inferior with a 64-bit GDB should look the same as debugging it
3748      with a 32-bit GDB, we need to convert it.  GDB core always sees
3749      the converted layout, so any read/write will have to be done
3750      post-conversion.  */
3751   siginfo_fixup (&siginfo, inf_siginfo, 0);
3752 
3753   if (offset + len > sizeof (siginfo))
3754     len = sizeof (siginfo) - offset;
3755 
3756   if (readbuf != NULL)
3757     memcpy (readbuf, inf_siginfo + offset, len);
3758   else
3759     {
3760       memcpy (inf_siginfo + offset, writebuf, len);
3761 
3762       /* Convert back to ptrace layout before flushing it out.  */
3763       siginfo_fixup (&siginfo, inf_siginfo, 1);
3764 
3765       errno = 0;
3766       ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3767       if (errno != 0)
3768 	return TARGET_XFER_E_IO;
3769     }
3770 
3771   *xfered_len = len;
3772   return TARGET_XFER_OK;
3773 }
3774 
3775 static enum target_xfer_status
3776 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3777 			const char *annex, gdb_byte *readbuf,
3778 			const gdb_byte *writebuf,
3779 			ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3780 {
3781   struct cleanup *old_chain;
3782   enum target_xfer_status xfer;
3783 
3784   if (object == TARGET_OBJECT_SIGNAL_INFO)
3785     return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3786 			       offset, len, xfered_len);
3787 
3788   /* The target is connected but no live inferior is selected.  Pass
3789      this request down to a lower stratum (e.g., the executable
3790      file).  */
3791   if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3792     return TARGET_XFER_EOF;
3793 
3794   old_chain = save_inferior_ptid ();
3795 
3796   if (ptid_lwp_p (inferior_ptid))
3797     inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
3798 
3799   xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3800 				     offset, len, xfered_len);
3801 
3802   do_cleanups (old_chain);
3803   return xfer;
3804 }
3805 
3806 static int
3807 linux_thread_alive (ptid_t ptid)
3808 {
3809   int err, tmp_errno;
3810 
3811   gdb_assert (ptid_lwp_p (ptid));
3812 
3813   /* Send signal 0 instead of anything ptrace, because ptracing a
3814      running thread errors out claiming that the thread doesn't
3815      exist.  */
3816   err = kill_lwp (ptid_get_lwp (ptid), 0);
3817   tmp_errno = errno;
3818   if (debug_linux_nat)
3819     fprintf_unfiltered (gdb_stdlog,
3820 			"LLTA: KILL(SIG0) %s (%s)\n",
3821 			target_pid_to_str (ptid),
3822 			err ? safe_strerror (tmp_errno) : "OK");
3823 
3824   if (err != 0)
3825     return 0;
3826 
3827   return 1;
3828 }
3829 
3830 static int
3831 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3832 {
3833   return linux_thread_alive (ptid);
3834 }
3835 
3836 static char *
3837 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3838 {
3839   static char buf[64];
3840 
3841   if (ptid_lwp_p (ptid)
3842       && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3843 	  || num_lwps (ptid_get_pid (ptid)) > 1))
3844     {
3845       snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3846       return buf;
3847     }
3848 
3849   return normal_pid_to_str (ptid);
3850 }
3851 
3852 static char *
3853 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
3854 {
3855   int pid = ptid_get_pid (thr->ptid);
3856   long lwp = ptid_get_lwp (thr->ptid);
3857 #define FORMAT "/proc/%d/task/%ld/comm"
3858   char buf[sizeof (FORMAT) + 30];
3859   FILE *comm_file;
3860   char *result = NULL;
3861 
3862   snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
3863   comm_file = gdb_fopen_cloexec (buf, "r");
3864   if (comm_file)
3865     {
3866       /* Not exported by the kernel, so we define it here.  */
3867 #define COMM_LEN 16
3868       static char line[COMM_LEN + 1];
3869 
3870       if (fgets (line, sizeof (line), comm_file))
3871 	{
3872 	  char *nl = strchr (line, '\n');
3873 
3874 	  if (nl)
3875 	    *nl = '\0';
3876 	  if (*line != '\0')
3877 	    result = line;
3878 	}
3879 
3880       fclose (comm_file);
3881     }
3882 
3883 #undef COMM_LEN
3884 #undef FORMAT
3885 
3886   return result;
3887 }
3888 
3889 /* Accepts an integer PID; Returns a string representing a file that
3890    can be opened to get the symbols for the child process.  */
3891 
3892 static char *
3893 linux_child_pid_to_exec_file (struct target_ops *self, int pid)
3894 {
3895   static char buf[PATH_MAX];
3896   char name[PATH_MAX];
3897 
3898   xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3899   memset (buf, 0, PATH_MAX);
3900   if (readlink (name, buf, PATH_MAX - 1) <= 0)
3901     strcpy (buf, name);
3902 
3903   return buf;
3904 }
3905 
3906 /* Implement the to_xfer_partial interface for memory reads using the /proc
3907    filesystem.  Because we can use a single read() call for /proc, this
3908    can be much more efficient than banging away at PTRACE_PEEKTEXT,
3909    but it doesn't support writes.  */
3910 
3911 static enum target_xfer_status
3912 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3913 			 const char *annex, gdb_byte *readbuf,
3914 			 const gdb_byte *writebuf,
3915 			 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
3916 {
3917   LONGEST ret;
3918   int fd;
3919   char filename[64];
3920 
3921   if (object != TARGET_OBJECT_MEMORY || !readbuf)
3922     return 0;
3923 
3924   /* Don't bother for one word.  */
3925   if (len < 3 * sizeof (long))
3926     return TARGET_XFER_EOF;
3927 
3928   /* We could keep this file open and cache it - possibly one per
3929      thread.  That requires some juggling, but is even faster.  */
3930   xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3931 	     ptid_get_pid (inferior_ptid));
3932   fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
3933   if (fd == -1)
3934     return TARGET_XFER_EOF;
3935 
3936   /* If pread64 is available, use it.  It's faster if the kernel
3937      supports it (only one syscall), and it's 64-bit safe even on
3938      32-bit platforms (for instance, SPARC debugging a SPARC64
3939      application).  */
3940 #ifdef HAVE_PREAD64
3941   if (pread64 (fd, readbuf, len, offset) != len)
3942 #else
3943   if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3944 #endif
3945     ret = 0;
3946   else
3947     ret = len;
3948 
3949   close (fd);
3950 
3951   if (ret == 0)
3952     return TARGET_XFER_EOF;
3953   else
3954     {
3955       *xfered_len = ret;
3956       return TARGET_XFER_OK;
3957     }
3958 }
3959 
3960 
3961 /* Enumerate spufs IDs for process PID.  */
3962 static LONGEST
3963 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
3964 {
3965   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
3966   LONGEST pos = 0;
3967   LONGEST written = 0;
3968   char path[128];
3969   DIR *dir;
3970   struct dirent *entry;
3971 
3972   xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3973   dir = opendir (path);
3974   if (!dir)
3975     return -1;
3976 
3977   rewinddir (dir);
3978   while ((entry = readdir (dir)) != NULL)
3979     {
3980       struct stat st;
3981       struct statfs stfs;
3982       int fd;
3983 
3984       fd = atoi (entry->d_name);
3985       if (!fd)
3986 	continue;
3987 
3988       xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3989       if (stat (path, &st) != 0)
3990 	continue;
3991       if (!S_ISDIR (st.st_mode))
3992 	continue;
3993 
3994       if (statfs (path, &stfs) != 0)
3995 	continue;
3996       if (stfs.f_type != SPUFS_MAGIC)
3997 	continue;
3998 
3999       if (pos >= offset && pos + 4 <= offset + len)
4000 	{
4001 	  store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4002 	  written += 4;
4003 	}
4004       pos += 4;
4005     }
4006 
4007   closedir (dir);
4008   return written;
4009 }
4010 
4011 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4012    object type, using the /proc file system.  */
4013 
4014 static enum target_xfer_status
4015 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4016 		     const char *annex, gdb_byte *readbuf,
4017 		     const gdb_byte *writebuf,
4018 		     ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4019 {
4020   char buf[128];
4021   int fd = 0;
4022   int ret = -1;
4023   int pid = ptid_get_pid (inferior_ptid);
4024 
4025   if (!annex)
4026     {
4027       if (!readbuf)
4028 	return TARGET_XFER_E_IO;
4029       else
4030 	{
4031 	  LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4032 
4033 	  if (l < 0)
4034 	    return TARGET_XFER_E_IO;
4035 	  else if (l == 0)
4036 	    return TARGET_XFER_EOF;
4037 	  else
4038 	    {
4039 	      *xfered_len = (ULONGEST) l;
4040 	      return TARGET_XFER_OK;
4041 	    }
4042 	}
4043     }
4044 
4045   xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4046   fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4047   if (fd <= 0)
4048     return TARGET_XFER_E_IO;
4049 
4050   if (offset != 0
4051       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4052     {
4053       close (fd);
4054       return TARGET_XFER_EOF;
4055     }
4056 
4057   if (writebuf)
4058     ret = write (fd, writebuf, (size_t) len);
4059   else if (readbuf)
4060     ret = read (fd, readbuf, (size_t) len);
4061 
4062   close (fd);
4063 
4064   if (ret < 0)
4065     return TARGET_XFER_E_IO;
4066   else if (ret == 0)
4067     return TARGET_XFER_EOF;
4068   else
4069     {
4070       *xfered_len = (ULONGEST) ret;
4071       return TARGET_XFER_OK;
4072     }
4073 }
4074 
4075 
4076 /* Parse LINE as a signal set and add its set bits to SIGS.  */
4077 
4078 static void
4079 add_line_to_sigset (const char *line, sigset_t *sigs)
4080 {
4081   int len = strlen (line) - 1;
4082   const char *p;
4083   int signum;
4084 
4085   if (line[len] != '\n')
4086     error (_("Could not parse signal set: %s"), line);
4087 
4088   p = line;
4089   signum = len * 4;
4090   while (len-- > 0)
4091     {
4092       int digit;
4093 
4094       if (*p >= '0' && *p <= '9')
4095 	digit = *p - '0';
4096       else if (*p >= 'a' && *p <= 'f')
4097 	digit = *p - 'a' + 10;
4098       else
4099 	error (_("Could not parse signal set: %s"), line);
4100 
4101       signum -= 4;
4102 
4103       if (digit & 1)
4104 	sigaddset (sigs, signum + 1);
4105       if (digit & 2)
4106 	sigaddset (sigs, signum + 2);
4107       if (digit & 4)
4108 	sigaddset (sigs, signum + 3);
4109       if (digit & 8)
4110 	sigaddset (sigs, signum + 4);
4111 
4112       p++;
4113     }
4114 }
4115 
4116 /* Find process PID's pending signals from /proc/pid/status and set
4117    SIGS to match.  */
4118 
4119 void
4120 linux_proc_pending_signals (int pid, sigset_t *pending,
4121 			    sigset_t *blocked, sigset_t *ignored)
4122 {
4123   FILE *procfile;
4124   char buffer[PATH_MAX], fname[PATH_MAX];
4125   struct cleanup *cleanup;
4126 
4127   sigemptyset (pending);
4128   sigemptyset (blocked);
4129   sigemptyset (ignored);
4130   xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4131   procfile = gdb_fopen_cloexec (fname, "r");
4132   if (procfile == NULL)
4133     error (_("Could not open %s"), fname);
4134   cleanup = make_cleanup_fclose (procfile);
4135 
4136   while (fgets (buffer, PATH_MAX, procfile) != NULL)
4137     {
4138       /* Normal queued signals are on the SigPnd line in the status
4139 	 file.  However, 2.6 kernels also have a "shared" pending
4140 	 queue for delivering signals to a thread group, so check for
4141 	 a ShdPnd line also.
4142 
4143 	 Unfortunately some Red Hat kernels include the shared pending
4144 	 queue but not the ShdPnd status field.  */
4145 
4146       if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4147 	add_line_to_sigset (buffer + 8, pending);
4148       else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4149 	add_line_to_sigset (buffer + 8, pending);
4150       else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4151 	add_line_to_sigset (buffer + 8, blocked);
4152       else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4153 	add_line_to_sigset (buffer + 8, ignored);
4154     }
4155 
4156   do_cleanups (cleanup);
4157 }
4158 
4159 static enum target_xfer_status
4160 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4161 		       const char *annex, gdb_byte *readbuf,
4162 		       const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4163 		       ULONGEST *xfered_len)
4164 {
4165   gdb_assert (object == TARGET_OBJECT_OSDATA);
4166 
4167   *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4168   if (*xfered_len == 0)
4169     return TARGET_XFER_EOF;
4170   else
4171     return TARGET_XFER_OK;
4172 }
4173 
4174 static enum target_xfer_status
4175 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4176                     const char *annex, gdb_byte *readbuf,
4177 		    const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4178 		    ULONGEST *xfered_len)
4179 {
4180   enum target_xfer_status xfer;
4181 
4182   if (object == TARGET_OBJECT_AUXV)
4183     return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4184 			     offset, len, xfered_len);
4185 
4186   if (object == TARGET_OBJECT_OSDATA)
4187     return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4188 				  offset, len, xfered_len);
4189 
4190   if (object == TARGET_OBJECT_SPU)
4191     return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4192 				offset, len, xfered_len);
4193 
4194   /* GDB calculates all the addresses in possibly larget width of the address.
4195      Address width needs to be masked before its final use - either by
4196      linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4197 
4198      Compare ADDR_BIT first to avoid a compiler warning on shift overflow.  */
4199 
4200   if (object == TARGET_OBJECT_MEMORY)
4201     {
4202       int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4203 
4204       if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4205 	offset &= ((ULONGEST) 1 << addr_bit) - 1;
4206     }
4207 
4208   xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4209 				  offset, len, xfered_len);
4210   if (xfer != TARGET_XFER_EOF)
4211     return xfer;
4212 
4213   return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4214 			     offset, len, xfered_len);
4215 }
4216 
4217 static void
4218 cleanup_target_stop (void *arg)
4219 {
4220   ptid_t *ptid = (ptid_t *) arg;
4221 
4222   gdb_assert (arg != NULL);
4223 
4224   /* Unpause all */
4225   target_resume (*ptid, 0, GDB_SIGNAL_0);
4226 }
4227 
4228 static VEC(static_tracepoint_marker_p) *
4229 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4230 						const char *strid)
4231 {
4232   char s[IPA_CMD_BUF_SIZE];
4233   struct cleanup *old_chain;
4234   int pid = ptid_get_pid (inferior_ptid);
4235   VEC(static_tracepoint_marker_p) *markers = NULL;
4236   struct static_tracepoint_marker *marker = NULL;
4237   char *p = s;
4238   ptid_t ptid = ptid_build (pid, 0, 0);
4239 
4240   /* Pause all */
4241   target_stop (ptid);
4242 
4243   memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4244   s[sizeof ("qTfSTM")] = 0;
4245 
4246   agent_run_command (pid, s, strlen (s) + 1);
4247 
4248   old_chain = make_cleanup (free_current_marker, &marker);
4249   make_cleanup (cleanup_target_stop, &ptid);
4250 
4251   while (*p++ == 'm')
4252     {
4253       if (marker == NULL)
4254 	marker = XCNEW (struct static_tracepoint_marker);
4255 
4256       do
4257 	{
4258 	  parse_static_tracepoint_marker_definition (p, &p, marker);
4259 
4260 	  if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4261 	    {
4262 	      VEC_safe_push (static_tracepoint_marker_p,
4263 			     markers, marker);
4264 	      marker = NULL;
4265 	    }
4266 	  else
4267 	    {
4268 	      release_static_tracepoint_marker (marker);
4269 	      memset (marker, 0, sizeof (*marker));
4270 	    }
4271 	}
4272       while (*p++ == ',');	/* comma-separated list */
4273 
4274       memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4275       s[sizeof ("qTsSTM")] = 0;
4276       agent_run_command (pid, s, strlen (s) + 1);
4277       p = s;
4278     }
4279 
4280   do_cleanups (old_chain);
4281 
4282   return markers;
4283 }
4284 
4285 /* Create a prototype generic GNU/Linux target.  The client can override
4286    it with local methods.  */
4287 
4288 static void
4289 linux_target_install_ops (struct target_ops *t)
4290 {
4291   t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4292   t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4293   t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4294   t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4295   t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4296   t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4297   t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4298   t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4299   t->to_post_startup_inferior = linux_child_post_startup_inferior;
4300   t->to_post_attach = linux_child_post_attach;
4301   t->to_follow_fork = linux_child_follow_fork;
4302 
4303   super_xfer_partial = t->to_xfer_partial;
4304   t->to_xfer_partial = linux_xfer_partial;
4305 
4306   t->to_static_tracepoint_markers_by_strid
4307     = linux_child_static_tracepoint_markers_by_strid;
4308 }
4309 
4310 struct target_ops *
4311 linux_target (void)
4312 {
4313   struct target_ops *t;
4314 
4315   t = inf_ptrace_target ();
4316   linux_target_install_ops (t);
4317 
4318   return t;
4319 }
4320 
4321 struct target_ops *
4322 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4323 {
4324   struct target_ops *t;
4325 
4326   t = inf_ptrace_trad_target (register_u_offset);
4327   linux_target_install_ops (t);
4328 
4329   return t;
4330 }
4331 
4332 /* target_is_async_p implementation.  */
4333 
4334 static int
4335 linux_nat_is_async_p (struct target_ops *ops)
4336 {
4337   return linux_is_async_p ();
4338 }
4339 
4340 /* target_can_async_p implementation.  */
4341 
4342 static int
4343 linux_nat_can_async_p (struct target_ops *ops)
4344 {
4345   /* NOTE: palves 2008-03-21: We're only async when the user requests
4346      it explicitly with the "set target-async" command.
4347      Someday, linux will always be async.  */
4348   return target_async_permitted;
4349 }
4350 
4351 static int
4352 linux_nat_supports_non_stop (struct target_ops *self)
4353 {
4354   return 1;
4355 }
4356 
4357 /* True if we want to support multi-process.  To be removed when GDB
4358    supports multi-exec.  */
4359 
4360 int linux_multi_process = 1;
4361 
4362 static int
4363 linux_nat_supports_multi_process (struct target_ops *self)
4364 {
4365   return linux_multi_process;
4366 }
4367 
4368 static int
4369 linux_nat_supports_disable_randomization (struct target_ops *self)
4370 {
4371 #ifdef HAVE_PERSONALITY
4372   return 1;
4373 #else
4374   return 0;
4375 #endif
4376 }
4377 
4378 static int async_terminal_is_ours = 1;
4379 
4380 /* target_terminal_inferior implementation.
4381 
4382    This is a wrapper around child_terminal_inferior to add async support.  */
4383 
4384 static void
4385 linux_nat_terminal_inferior (struct target_ops *self)
4386 {
4387   /* Like target_terminal_inferior, use target_can_async_p, not
4388      target_is_async_p, since at this point the target is not async
4389      yet.  If it can async, then we know it will become async prior to
4390      resume.  */
4391   if (!target_can_async_p ())
4392     {
4393       /* Async mode is disabled.  */
4394       child_terminal_inferior (self);
4395       return;
4396     }
4397 
4398   child_terminal_inferior (self);
4399 
4400   /* Calls to target_terminal_*() are meant to be idempotent.  */
4401   if (!async_terminal_is_ours)
4402     return;
4403 
4404   delete_file_handler (input_fd);
4405   async_terminal_is_ours = 0;
4406   set_sigint_trap ();
4407 }
4408 
4409 /* target_terminal_ours implementation.
4410 
4411    This is a wrapper around child_terminal_ours to add async support (and
4412    implement the target_terminal_ours vs target_terminal_ours_for_output
4413    distinction).  child_terminal_ours is currently no different than
4414    child_terminal_ours_for_output.
4415    We leave target_terminal_ours_for_output alone, leaving it to
4416    child_terminal_ours_for_output.  */
4417 
4418 static void
4419 linux_nat_terminal_ours (struct target_ops *self)
4420 {
4421   /* GDB should never give the terminal to the inferior if the
4422      inferior is running in the background (run&, continue&, etc.),
4423      but claiming it sure should.  */
4424   child_terminal_ours (self);
4425 
4426   if (async_terminal_is_ours)
4427     return;
4428 
4429   clear_sigint_trap ();
4430   add_file_handler (input_fd, stdin_event_handler, 0);
4431   async_terminal_is_ours = 1;
4432 }
4433 
4434 static void (*async_client_callback) (enum inferior_event_type event_type,
4435 				      void *context);
4436 static void *async_client_context;
4437 
4438 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4439    so we notice when any child changes state, and notify the
4440    event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4441    above to wait for the arrival of a SIGCHLD.  */
4442 
4443 static void
4444 sigchld_handler (int signo)
4445 {
4446   int old_errno = errno;
4447 
4448   if (debug_linux_nat)
4449     ui_file_write_async_safe (gdb_stdlog,
4450 			      "sigchld\n", sizeof ("sigchld\n") - 1);
4451 
4452   if (signo == SIGCHLD
4453       && linux_nat_event_pipe[0] != -1)
4454     async_file_mark (); /* Let the event loop know that there are
4455 			   events to handle.  */
4456 
4457   errno = old_errno;
4458 }
4459 
4460 /* Callback registered with the target events file descriptor.  */
4461 
4462 static void
4463 handle_target_event (int error, gdb_client_data client_data)
4464 {
4465   (*async_client_callback) (INF_REG_EVENT, async_client_context);
4466 }
4467 
4468 /* Create/destroy the target events pipe.  Returns previous state.  */
4469 
4470 static int
4471 linux_async_pipe (int enable)
4472 {
4473   int previous = linux_is_async_p ();
4474 
4475   if (previous != enable)
4476     {
4477       sigset_t prev_mask;
4478 
4479       /* Block child signals while we create/destroy the pipe, as
4480 	 their handler writes to it.  */
4481       block_child_signals (&prev_mask);
4482 
4483       if (enable)
4484 	{
4485 	  if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4486 	    internal_error (__FILE__, __LINE__,
4487 			    "creating event pipe failed.");
4488 
4489 	  fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4490 	  fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4491 	}
4492       else
4493 	{
4494 	  close (linux_nat_event_pipe[0]);
4495 	  close (linux_nat_event_pipe[1]);
4496 	  linux_nat_event_pipe[0] = -1;
4497 	  linux_nat_event_pipe[1] = -1;
4498 	}
4499 
4500       restore_child_signals_mask (&prev_mask);
4501     }
4502 
4503   return previous;
4504 }
4505 
4506 /* target_async implementation.  */
4507 
4508 static void
4509 linux_nat_async (struct target_ops *ops,
4510 		 void (*callback) (enum inferior_event_type event_type,
4511 				   void *context),
4512 		 void *context)
4513 {
4514   if (callback != NULL)
4515     {
4516       async_client_callback = callback;
4517       async_client_context = context;
4518       if (!linux_async_pipe (1))
4519 	{
4520 	  add_file_handler (linux_nat_event_pipe[0],
4521 			    handle_target_event, NULL);
4522 	  /* There may be pending events to handle.  Tell the event loop
4523 	     to poll them.  */
4524 	  async_file_mark ();
4525 	}
4526     }
4527   else
4528     {
4529       async_client_callback = callback;
4530       async_client_context = context;
4531       delete_file_handler (linux_nat_event_pipe[0]);
4532       linux_async_pipe (0);
4533     }
4534   return;
4535 }
4536 
4537 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4538    event came out.  */
4539 
4540 static int
4541 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4542 {
4543   if (!lwp->stopped)
4544     {
4545       if (debug_linux_nat)
4546 	fprintf_unfiltered (gdb_stdlog,
4547 			    "LNSL: running -> suspending %s\n",
4548 			    target_pid_to_str (lwp->ptid));
4549 
4550 
4551       if (lwp->last_resume_kind == resume_stop)
4552 	{
4553 	  if (debug_linux_nat)
4554 	    fprintf_unfiltered (gdb_stdlog,
4555 				"linux-nat: already stopping LWP %ld at "
4556 				"GDB's request\n",
4557 				ptid_get_lwp (lwp->ptid));
4558 	  return 0;
4559 	}
4560 
4561       stop_callback (lwp, NULL);
4562       lwp->last_resume_kind = resume_stop;
4563     }
4564   else
4565     {
4566       /* Already known to be stopped; do nothing.  */
4567 
4568       if (debug_linux_nat)
4569 	{
4570 	  if (find_thread_ptid (lwp->ptid)->stop_requested)
4571 	    fprintf_unfiltered (gdb_stdlog,
4572 				"LNSL: already stopped/stop_requested %s\n",
4573 				target_pid_to_str (lwp->ptid));
4574 	  else
4575 	    fprintf_unfiltered (gdb_stdlog,
4576 				"LNSL: already stopped/no "
4577 				"stop_requested yet %s\n",
4578 				target_pid_to_str (lwp->ptid));
4579 	}
4580     }
4581   return 0;
4582 }
4583 
4584 static void
4585 linux_nat_stop (struct target_ops *self, ptid_t ptid)
4586 {
4587   if (non_stop)
4588     iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4589   else
4590     linux_ops->to_stop (linux_ops, ptid);
4591 }
4592 
4593 static void
4594 linux_nat_close (struct target_ops *self)
4595 {
4596   /* Unregister from the event loop.  */
4597   if (linux_nat_is_async_p (self))
4598     linux_nat_async (self, NULL, NULL);
4599 
4600   if (linux_ops->to_close)
4601     linux_ops->to_close (linux_ops);
4602 
4603   super_close (self);
4604 }
4605 
4606 /* When requests are passed down from the linux-nat layer to the
4607    single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4608    used.  The address space pointer is stored in the inferior object,
4609    but the common code that is passed such ptid can't tell whether
4610    lwpid is a "main" process id or not (it assumes so).  We reverse
4611    look up the "main" process id from the lwp here.  */
4612 
4613 static struct address_space *
4614 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4615 {
4616   struct lwp_info *lwp;
4617   struct inferior *inf;
4618   int pid;
4619 
4620   if (ptid_get_lwp (ptid) == 0)
4621     {
4622       /* An (lwpid,0,0) ptid.  Look up the lwp object to get at the
4623 	 tgid.  */
4624       lwp = find_lwp_pid (ptid);
4625       pid = ptid_get_pid (lwp->ptid);
4626     }
4627   else
4628     {
4629       /* A (pid,lwpid,0) ptid.  */
4630       pid = ptid_get_pid (ptid);
4631     }
4632 
4633   inf = find_inferior_pid (pid);
4634   gdb_assert (inf != NULL);
4635   return inf->aspace;
4636 }
4637 
4638 /* Return the cached value of the processor core for thread PTID.  */
4639 
4640 static int
4641 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4642 {
4643   struct lwp_info *info = find_lwp_pid (ptid);
4644 
4645   if (info)
4646     return info->core;
4647   return -1;
4648 }
4649 
4650 void
4651 linux_nat_add_target (struct target_ops *t)
4652 {
4653   /* Save the provided single-threaded target.  We save this in a separate
4654      variable because another target we've inherited from (e.g. inf-ptrace)
4655      may have saved a pointer to T; we want to use it for the final
4656      process stratum target.  */
4657   linux_ops_saved = *t;
4658   linux_ops = &linux_ops_saved;
4659 
4660   /* Override some methods for multithreading.  */
4661   t->to_create_inferior = linux_nat_create_inferior;
4662   t->to_attach = linux_nat_attach;
4663   t->to_detach = linux_nat_detach;
4664   t->to_resume = linux_nat_resume;
4665   t->to_wait = linux_nat_wait;
4666   t->to_pass_signals = linux_nat_pass_signals;
4667   t->to_xfer_partial = linux_nat_xfer_partial;
4668   t->to_kill = linux_nat_kill;
4669   t->to_mourn_inferior = linux_nat_mourn_inferior;
4670   t->to_thread_alive = linux_nat_thread_alive;
4671   t->to_pid_to_str = linux_nat_pid_to_str;
4672   t->to_thread_name = linux_nat_thread_name;
4673   t->to_has_thread_control = tc_schedlock;
4674   t->to_thread_address_space = linux_nat_thread_address_space;
4675   t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4676   t->to_stopped_data_address = linux_nat_stopped_data_address;
4677 
4678   t->to_can_async_p = linux_nat_can_async_p;
4679   t->to_is_async_p = linux_nat_is_async_p;
4680   t->to_supports_non_stop = linux_nat_supports_non_stop;
4681   t->to_async = linux_nat_async;
4682   t->to_terminal_inferior = linux_nat_terminal_inferior;
4683   t->to_terminal_ours = linux_nat_terminal_ours;
4684 
4685   super_close = t->to_close;
4686   t->to_close = linux_nat_close;
4687 
4688   /* Methods for non-stop support.  */
4689   t->to_stop = linux_nat_stop;
4690 
4691   t->to_supports_multi_process = linux_nat_supports_multi_process;
4692 
4693   t->to_supports_disable_randomization
4694     = linux_nat_supports_disable_randomization;
4695 
4696   t->to_core_of_thread = linux_nat_core_of_thread;
4697 
4698   /* We don't change the stratum; this target will sit at
4699      process_stratum and thread_db will set at thread_stratum.  This
4700      is a little strange, since this is a multi-threaded-capable
4701      target, but we want to be on the stack below thread_db, and we
4702      also want to be used for single-threaded processes.  */
4703 
4704   add_target (t);
4705 }
4706 
4707 /* Register a method to call whenever a new thread is attached.  */
4708 void
4709 linux_nat_set_new_thread (struct target_ops *t,
4710 			  void (*new_thread) (struct lwp_info *))
4711 {
4712   /* Save the pointer.  We only support a single registered instance
4713      of the GNU/Linux native target, so we do not need to map this to
4714      T.  */
4715   linux_nat_new_thread = new_thread;
4716 }
4717 
4718 /* See declaration in linux-nat.h.  */
4719 
4720 void
4721 linux_nat_set_new_fork (struct target_ops *t,
4722 			linux_nat_new_fork_ftype *new_fork)
4723 {
4724   /* Save the pointer.  */
4725   linux_nat_new_fork = new_fork;
4726 }
4727 
4728 /* See declaration in linux-nat.h.  */
4729 
4730 void
4731 linux_nat_set_forget_process (struct target_ops *t,
4732 			      linux_nat_forget_process_ftype *fn)
4733 {
4734   /* Save the pointer.  */
4735   linux_nat_forget_process_hook = fn;
4736 }
4737 
4738 /* See declaration in linux-nat.h.  */
4739 
4740 void
4741 linux_nat_forget_process (pid_t pid)
4742 {
4743   if (linux_nat_forget_process_hook != NULL)
4744     linux_nat_forget_process_hook (pid);
4745 }
4746 
4747 /* Register a method that converts a siginfo object between the layout
4748    that ptrace returns, and the layout in the architecture of the
4749    inferior.  */
4750 void
4751 linux_nat_set_siginfo_fixup (struct target_ops *t,
4752 			     int (*siginfo_fixup) (siginfo_t *,
4753 						   gdb_byte *,
4754 						   int))
4755 {
4756   /* Save the pointer.  */
4757   linux_nat_siginfo_fixup = siginfo_fixup;
4758 }
4759 
4760 /* Register a method to call prior to resuming a thread.  */
4761 
4762 void
4763 linux_nat_set_prepare_to_resume (struct target_ops *t,
4764 				 void (*prepare_to_resume) (struct lwp_info *))
4765 {
4766   /* Save the pointer.  */
4767   linux_nat_prepare_to_resume = prepare_to_resume;
4768 }
4769 
4770 /* See linux-nat.h.  */
4771 
4772 int
4773 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4774 {
4775   int pid;
4776 
4777   pid = ptid_get_lwp (ptid);
4778   if (pid == 0)
4779     pid = ptid_get_pid (ptid);
4780 
4781   errno = 0;
4782   ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4783   if (errno != 0)
4784     {
4785       memset (siginfo, 0, sizeof (*siginfo));
4786       return 0;
4787     }
4788   return 1;
4789 }
4790 
4791 /* Provide a prototype to silence -Wmissing-prototypes.  */
4792 extern initialize_file_ftype _initialize_linux_nat;
4793 
4794 void
4795 _initialize_linux_nat (void)
4796 {
4797   add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4798 			     &debug_linux_nat, _("\
4799 Set debugging of GNU/Linux lwp module."), _("\
4800 Show debugging of GNU/Linux lwp module."), _("\
4801 Enables printf debugging output."),
4802 			     NULL,
4803 			     show_debug_linux_nat,
4804 			     &setdebuglist, &showdebuglist);
4805 
4806   /* Save this mask as the default.  */
4807   sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4808 
4809   /* Install a SIGCHLD handler.  */
4810   sigchld_action.sa_handler = sigchld_handler;
4811   sigemptyset (&sigchld_action.sa_mask);
4812   sigchld_action.sa_flags = SA_RESTART;
4813 
4814   /* Make it the default.  */
4815   sigaction (SIGCHLD, &sigchld_action, NULL);
4816 
4817   /* Make sure we don't block SIGCHLD during a sigsuspend.  */
4818   sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4819   sigdelset (&suspend_mask, SIGCHLD);
4820 
4821   sigemptyset (&blocked_mask);
4822 
4823   /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4824      support read-only process state.  */
4825   linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4826 				     | PTRACE_O_TRACEVFORKDONE
4827 				     | PTRACE_O_TRACEVFORK
4828 				     | PTRACE_O_TRACEFORK
4829 				     | PTRACE_O_TRACEEXEC);
4830 }
4831 
4832 
4833 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4834    the GNU/Linux Threads library and therefore doesn't really belong
4835    here.  */
4836 
4837 /* Read variable NAME in the target and return its value if found.
4838    Otherwise return zero.  It is assumed that the type of the variable
4839    is `int'.  */
4840 
4841 static int
4842 get_signo (const char *name)
4843 {
4844   struct bound_minimal_symbol ms;
4845   int signo;
4846 
4847   ms = lookup_minimal_symbol (name, NULL, NULL);
4848   if (ms.minsym == NULL)
4849     return 0;
4850 
4851   if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4852 			  sizeof (signo)) != 0)
4853     return 0;
4854 
4855   return signo;
4856 }
4857 
4858 /* Return the set of signals used by the threads library in *SET.  */
4859 
4860 void
4861 lin_thread_get_thread_signals (sigset_t *set)
4862 {
4863   struct sigaction action;
4864   int restart, cancel;
4865 
4866   sigemptyset (&blocked_mask);
4867   sigemptyset (set);
4868 
4869   restart = get_signo ("__pthread_sig_restart");
4870   cancel = get_signo ("__pthread_sig_cancel");
4871 
4872   /* LinuxThreads normally uses the first two RT signals, but in some legacy
4873      cases may use SIGUSR1/SIGUSR2.  NPTL always uses RT signals, but does
4874      not provide any way for the debugger to query the signal numbers -
4875      fortunately they don't change!  */
4876 
4877   if (restart == 0)
4878     restart = __SIGRTMIN;
4879 
4880   if (cancel == 0)
4881     cancel = __SIGRTMIN + 1;
4882 
4883   sigaddset (set, restart);
4884   sigaddset (set, cancel);
4885 
4886   /* The GNU/Linux Threads library makes terminating threads send a
4887      special "cancel" signal instead of SIGCHLD.  Make sure we catch
4888      those (to prevent them from terminating GDB itself, which is
4889      likely to be their default action) and treat them the same way as
4890      SIGCHLD.  */
4891 
4892   action.sa_handler = sigchld_handler;
4893   sigemptyset (&action.sa_mask);
4894   action.sa_flags = SA_RESTART;
4895   sigaction (cancel, &action, NULL);
4896 
4897   /* We block the "cancel" signal throughout this code ...  */
4898   sigaddset (&blocked_mask, cancel);
4899   sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4900 
4901   /* ... except during a sigsuspend.  */
4902   sigdelset (&suspend_mask, cancel);
4903 }
4904