xref: /netbsd-src/external/gpl3/gdb/dist/gdb/linux-nat.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /* GNU/Linux native-dependent code common to multiple platforms.
2 
3    Copyright (C) 2001-2013 Free Software Foundation, Inc.
4 
5    This file is part of GDB.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h>		/* for MAXPATHLEN */
43 #include <sys/procfs.h>		/* for elf_gregset etc.  */
44 #include "elf-bfd.h"		/* for elfcore_write_* */
45 #include "gregset.h"		/* for gregset */
46 #include "gdbcore.h"		/* for get_exec_file */
47 #include <ctype.h>		/* for isdigit */
48 #include "gdbthread.h"		/* for struct thread_info etc.  */
49 #include "gdb_stat.h"		/* for struct stat */
50 #include <fcntl.h>		/* for O_RDONLY */
51 #include "inf-loop.h"
52 #include "event-loop.h"
53 #include "event-top.h"
54 #include <pwd.h>
55 #include <sys/types.h>
56 #include "gdb_dirent.h"
57 #include "xml-support.h"
58 #include "terminal.h"
59 #include <sys/vfs.h>
60 #include "solib.h"
61 #include "linux-osdata.h"
62 #include "linux-tdep.h"
63 #include "symfile.h"
64 #include "agent.h"
65 #include "tracepoint.h"
66 #include "exceptions.h"
67 #include "linux-ptrace.h"
68 #include "buffer.h"
69 #include "target-descriptions.h"
70 
71 #ifndef SPUFS_MAGIC
72 #define SPUFS_MAGIC 0x23c9b64e
73 #endif
74 
75 #ifdef HAVE_PERSONALITY
76 # include <sys/personality.h>
77 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
78 #  define ADDR_NO_RANDOMIZE 0x0040000
79 # endif
80 #endif /* HAVE_PERSONALITY */
81 
82 /* This comment documents high-level logic of this file.
83 
84 Waiting for events in sync mode
85 ===============================
86 
87 When waiting for an event in a specific thread, we just use waitpid, passing
88 the specific pid, and not passing WNOHANG.
89 
90 When waiting for an event in all threads, waitpid is not quite good.  Prior to
91 version 2.4, Linux can either wait for event in main thread, or in secondary
92 threads.  (2.4 has the __WALL flag).  So, if we use blocking waitpid, we might
93 miss an event.  The solution is to use non-blocking waitpid, together with
94 sigsuspend.  First, we use non-blocking waitpid to get an event in the main
95 process, if any.  Second, we use non-blocking waitpid with the __WCLONED
96 flag to check for events in cloned processes.  If nothing is found, we use
97 sigsuspend to wait for SIGCHLD.  When SIGCHLD arrives, it means something
98 happened to a child process -- and SIGCHLD will be delivered both for events
99 in main debugged process and in cloned processes.  As soon as we know there's
100 an event, we get back to calling nonblocking waitpid with and without
101 __WCLONED.
102 
103 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
104 so that we don't miss a signal.  If SIGCHLD arrives in between, when it's
105 blocked, the signal becomes pending and sigsuspend immediately
106 notices it and returns.
107 
108 Waiting for events in async mode
109 ================================
110 
111 In async mode, GDB should always be ready to handle both user input
112 and target events, so neither blocking waitpid nor sigsuspend are
113 viable options.  Instead, we should asynchronously notify the GDB main
114 event loop whenever there's an unprocessed event from the target.  We
115 detect asynchronous target events by handling SIGCHLD signals.  To
116 notify the event loop about target events, the self-pipe trick is used
117 --- a pipe is registered as waitable event source in the event loop,
118 the event loop select/poll's on the read end of this pipe (as well on
119 other event sources, e.g., stdin), and the SIGCHLD handler writes a
120 byte to this pipe.  This is more portable than relying on
121 pselect/ppoll, since on kernels that lack those syscalls, libc
122 emulates them with select/poll+sigprocmask, and that is racy
123 (a.k.a. plain broken).
124 
125 Obviously, if we fail to notify the event loop if there's a target
126 event, it's bad.  OTOH, if we notify the event loop when there's no
127 event from the target, linux_nat_wait will detect that there's no real
128 event to report, and return event of type TARGET_WAITKIND_IGNORE.
129 This is mostly harmless, but it will waste time and is better avoided.
130 
131 The main design point is that every time GDB is outside linux-nat.c,
132 we have a SIGCHLD handler installed that is called when something
133 happens to the target and notifies the GDB event loop.  Whenever GDB
134 core decides to handle the event, and calls into linux-nat.c, we
135 process things as in sync mode, except that the we never block in
136 sigsuspend.
137 
138 While processing an event, we may end up momentarily blocked in
139 waitpid calls.  Those waitpid calls, while blocking, are guarantied to
140 return quickly.  E.g., in all-stop mode, before reporting to the core
141 that an LWP hit a breakpoint, all LWPs are stopped by sending them
142 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
143 Note that this is different from blocking indefinitely waiting for the
144 next event --- here, we're already handling an event.
145 
146 Use of signals
147 ==============
148 
149 We stop threads by sending a SIGSTOP.  The use of SIGSTOP instead of another
150 signal is not entirely significant; we just need for a signal to be delivered,
151 so that we can intercept it.  SIGSTOP's advantage is that it can not be
152 blocked.  A disadvantage is that it is not a real-time signal, so it can only
153 be queued once; we do not keep track of other sources of SIGSTOP.
154 
155 Two other signals that can't be blocked are SIGCONT and SIGKILL.  But we can't
156 use them, because they have special behavior when the signal is generated -
157 not when it is delivered.  SIGCONT resumes the entire thread group and SIGKILL
158 kills the entire thread group.
159 
160 A delivered SIGSTOP would stop the entire thread group, not just the thread we
161 tkill'd.  But we never let the SIGSTOP be delivered; we always intercept and
162 cancel it (by PTRACE_CONT without passing SIGSTOP).
163 
164 We could use a real-time signal instead.  This would solve those problems; we
165 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
166 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
167 generates it, and there are races with trying to find a signal that is not
168 blocked.  */
169 
170 #ifndef O_LARGEFILE
171 #define O_LARGEFILE 0
172 #endif
173 
174 /* Unlike other extended result codes, WSTOPSIG (status) on
175    PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
176    instead SIGTRAP with bit 7 set.  */
177 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
178 
179 /* The single-threaded native GNU/Linux target_ops.  We save a pointer for
180    the use of the multi-threaded target.  */
181 static struct target_ops *linux_ops;
182 static struct target_ops linux_ops_saved;
183 
184 /* The method to call, if any, when a new thread is attached.  */
185 static void (*linux_nat_new_thread) (struct lwp_info *);
186 
187 /* The method to call, if any, when a new fork is attached.  */
188 static linux_nat_new_fork_ftype *linux_nat_new_fork;
189 
190 /* The method to call, if any, when a process is no longer
191    attached.  */
192 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
193 
194 /* Hook to call prior to resuming a thread.  */
195 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
196 
197 /* The method to call, if any, when the siginfo object needs to be
198    converted between the layout returned by ptrace, and the layout in
199    the architecture of the inferior.  */
200 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
201 				       gdb_byte *,
202 				       int);
203 
204 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205    Called by our to_xfer_partial.  */
206 static LONGEST (*super_xfer_partial) (struct target_ops *,
207 				      enum target_object,
208 				      const char *, gdb_byte *,
209 				      const gdb_byte *,
210 				      ULONGEST, LONGEST);
211 
212 static unsigned int debug_linux_nat;
213 static void
214 show_debug_linux_nat (struct ui_file *file, int from_tty,
215 		      struct cmd_list_element *c, const char *value)
216 {
217   fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 		    value);
219 }
220 
221 struct simple_pid_list
222 {
223   int pid;
224   int status;
225   struct simple_pid_list *next;
226 };
227 struct simple_pid_list *stopped_pids;
228 
229 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
230    can not be used, 1 if it can.  */
231 
232 static int linux_supports_tracefork_flag = -1;
233 
234 /* This variable is a tri-state flag: -1 for unknown, 0 if
235    PTRACE_O_TRACESYSGOOD can not be used, 1 if it can.  */
236 
237 static int linux_supports_tracesysgood_flag = -1;
238 
239 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
240    PTRACE_O_TRACEVFORKDONE.  */
241 
242 static int linux_supports_tracevforkdone_flag = -1;
243 
244 /* Stores the current used ptrace() options.  */
245 static int current_ptrace_options = 0;
246 
247 /* Async mode support.  */
248 
249 /* The read/write ends of the pipe registered as waitable file in the
250    event loop.  */
251 static int linux_nat_event_pipe[2] = { -1, -1 };
252 
253 /* Flush the event pipe.  */
254 
255 static void
256 async_file_flush (void)
257 {
258   int ret;
259   char buf;
260 
261   do
262     {
263       ret = read (linux_nat_event_pipe[0], &buf, 1);
264     }
265   while (ret >= 0 || (ret == -1 && errno == EINTR));
266 }
267 
268 /* Put something (anything, doesn't matter what, or how much) in event
269    pipe, so that the select/poll in the event-loop realizes we have
270    something to process.  */
271 
272 static void
273 async_file_mark (void)
274 {
275   int ret;
276 
277   /* It doesn't really matter what the pipe contains, as long we end
278      up with something in it.  Might as well flush the previous
279      left-overs.  */
280   async_file_flush ();
281 
282   do
283     {
284       ret = write (linux_nat_event_pipe[1], "+", 1);
285     }
286   while (ret == -1 && errno == EINTR);
287 
288   /* Ignore EAGAIN.  If the pipe is full, the event loop will already
289      be awakened anyway.  */
290 }
291 
292 static void linux_nat_async (void (*callback)
293 			     (enum inferior_event_type event_type,
294 			      void *context),
295 			     void *context);
296 static int kill_lwp (int lwpid, int signo);
297 
298 static int stop_callback (struct lwp_info *lp, void *data);
299 
300 static void block_child_signals (sigset_t *prev_mask);
301 static void restore_child_signals_mask (sigset_t *prev_mask);
302 
303 struct lwp_info;
304 static struct lwp_info *add_lwp (ptid_t ptid);
305 static void purge_lwp_list (int pid);
306 static void delete_lwp (ptid_t ptid);
307 static struct lwp_info *find_lwp_pid (ptid_t ptid);
308 
309 
310 /* Trivial list manipulation functions to keep track of a list of
311    new stopped processes.  */
312 static void
313 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
314 {
315   struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
316 
317   new_pid->pid = pid;
318   new_pid->status = status;
319   new_pid->next = *listp;
320   *listp = new_pid;
321 }
322 
323 static int
324 in_pid_list_p (struct simple_pid_list *list, int pid)
325 {
326   struct simple_pid_list *p;
327 
328   for (p = list; p != NULL; p = p->next)
329     if (p->pid == pid)
330       return 1;
331   return 0;
332 }
333 
334 static int
335 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
336 {
337   struct simple_pid_list **p;
338 
339   for (p = listp; *p != NULL; p = &(*p)->next)
340     if ((*p)->pid == pid)
341       {
342 	struct simple_pid_list *next = (*p)->next;
343 
344 	*statusp = (*p)->status;
345 	xfree (*p);
346 	*p = next;
347 	return 1;
348       }
349   return 0;
350 }
351 
352 
353 /* A helper function for linux_test_for_tracefork, called after fork ().  */
354 
355 static void
356 linux_tracefork_child (void)
357 {
358   ptrace (PTRACE_TRACEME, 0, 0, 0);
359   kill (getpid (), SIGSTOP);
360   fork ();
361   _exit (0);
362 }
363 
364 /* Wrapper function for waitpid which handles EINTR.  */
365 
366 static int
367 my_waitpid (int pid, int *statusp, int flags)
368 {
369   int ret;
370 
371   do
372     {
373       ret = waitpid (pid, statusp, flags);
374     }
375   while (ret == -1 && errno == EINTR);
376 
377   return ret;
378 }
379 
380 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
381 
382    First, we try to enable fork tracing on ORIGINAL_PID.  If this fails,
383    we know that the feature is not available.  This may change the tracing
384    options for ORIGINAL_PID, but we'll be setting them shortly anyway.
385 
386    However, if it succeeds, we don't know for sure that the feature is
387    available; old versions of PTRACE_SETOPTIONS ignored unknown options.  We
388    create a child process, attach to it, use PTRACE_SETOPTIONS to enable
389    fork tracing, and let it fork.  If the process exits, we assume that we
390    can't use TRACEFORK; if we get the fork notification, and we can extract
391    the new child's PID, then we assume that we can.  */
392 
393 static void
394 linux_test_for_tracefork (int original_pid)
395 {
396   int child_pid, ret, status;
397   long second_pid;
398   sigset_t prev_mask;
399 
400   /* We don't want those ptrace calls to be interrupted.  */
401   block_child_signals (&prev_mask);
402 
403   linux_supports_tracefork_flag = 0;
404   linux_supports_tracevforkdone_flag = 0;
405 
406   ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
407   if (ret != 0)
408     {
409       restore_child_signals_mask (&prev_mask);
410       return;
411     }
412 
413   child_pid = fork ();
414   if (child_pid == -1)
415     perror_with_name (("fork"));
416 
417   if (child_pid == 0)
418     linux_tracefork_child ();
419 
420   ret = my_waitpid (child_pid, &status, 0);
421   if (ret == -1)
422     perror_with_name (("waitpid"));
423   else if (ret != child_pid)
424     error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
425   if (! WIFSTOPPED (status))
426     error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
427 	   status);
428 
429   ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
430   if (ret != 0)
431     {
432       ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
433       if (ret != 0)
434 	{
435 	  warning (_("linux_test_for_tracefork: failed to kill child"));
436 	  restore_child_signals_mask (&prev_mask);
437 	  return;
438 	}
439 
440       ret = my_waitpid (child_pid, &status, 0);
441       if (ret != child_pid)
442 	warning (_("linux_test_for_tracefork: failed "
443 		   "to wait for killed child"));
444       else if (!WIFSIGNALED (status))
445 	warning (_("linux_test_for_tracefork: unexpected "
446 		   "wait status 0x%x from killed child"), status);
447 
448       restore_child_signals_mask (&prev_mask);
449       return;
450     }
451 
452   /* Check whether PTRACE_O_TRACEVFORKDONE is available.  */
453   ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
454 		PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
455   linux_supports_tracevforkdone_flag = (ret == 0);
456 
457   ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
458   if (ret != 0)
459     warning (_("linux_test_for_tracefork: failed to resume child"));
460 
461   ret = my_waitpid (child_pid, &status, 0);
462 
463   if (ret == child_pid && WIFSTOPPED (status)
464       && status >> 16 == PTRACE_EVENT_FORK)
465     {
466       second_pid = 0;
467       ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
468       if (ret == 0 && second_pid != 0)
469 	{
470 	  int second_status;
471 
472 	  linux_supports_tracefork_flag = 1;
473 	  my_waitpid (second_pid, &second_status, 0);
474 	  ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
475 	  if (ret != 0)
476 	    warning (_("linux_test_for_tracefork: "
477 		       "failed to kill second child"));
478 	  my_waitpid (second_pid, &status, 0);
479 	}
480     }
481   else
482     warning (_("linux_test_for_tracefork: unexpected result from waitpid "
483 	     "(%d, status 0x%x)"), ret, status);
484 
485   ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
486   if (ret != 0)
487     warning (_("linux_test_for_tracefork: failed to kill child"));
488   my_waitpid (child_pid, &status, 0);
489 
490   restore_child_signals_mask (&prev_mask);
491 }
492 
493 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
494 
495    We try to enable syscall tracing on ORIGINAL_PID.  If this fails,
496    we know that the feature is not available.  This may change the tracing
497    options for ORIGINAL_PID, but we'll be setting them shortly anyway.  */
498 
499 static void
500 linux_test_for_tracesysgood (int original_pid)
501 {
502   int ret;
503   sigset_t prev_mask;
504 
505   /* We don't want those ptrace calls to be interrupted.  */
506   block_child_signals (&prev_mask);
507 
508   linux_supports_tracesysgood_flag = 0;
509 
510   ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
511   if (ret != 0)
512     goto out;
513 
514   linux_supports_tracesysgood_flag = 1;
515 out:
516   restore_child_signals_mask (&prev_mask);
517 }
518 
519 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
520    This function also sets linux_supports_tracesysgood_flag.  */
521 
522 static int
523 linux_supports_tracesysgood (int pid)
524 {
525   if (linux_supports_tracesysgood_flag == -1)
526     linux_test_for_tracesysgood (pid);
527   return linux_supports_tracesysgood_flag;
528 }
529 
530 /* Return non-zero iff we have tracefork functionality available.
531    This function also sets linux_supports_tracefork_flag.  */
532 
533 static int
534 linux_supports_tracefork (int pid)
535 {
536   if (linux_supports_tracefork_flag == -1)
537     linux_test_for_tracefork (pid);
538   return linux_supports_tracefork_flag;
539 }
540 
541 static int
542 linux_supports_tracevforkdone (int pid)
543 {
544   if (linux_supports_tracefork_flag == -1)
545     linux_test_for_tracefork (pid);
546   return linux_supports_tracevforkdone_flag;
547 }
548 
549 static void
550 linux_enable_tracesysgood (ptid_t ptid)
551 {
552   int pid = ptid_get_lwp (ptid);
553 
554   if (pid == 0)
555     pid = ptid_get_pid (ptid);
556 
557   if (linux_supports_tracesysgood (pid) == 0)
558     return;
559 
560   current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
561 
562   ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
563 }
564 
565 
566 void
567 linux_enable_event_reporting (ptid_t ptid)
568 {
569   int pid = ptid_get_lwp (ptid);
570 
571   if (pid == 0)
572     pid = ptid_get_pid (ptid);
573 
574   if (! linux_supports_tracefork (pid))
575     return;
576 
577   current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
578     | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
579 
580   if (linux_supports_tracevforkdone (pid))
581     current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
582 
583   /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
584      read-only process state.  */
585 
586   ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
587 }
588 
589 static void
590 linux_child_post_attach (int pid)
591 {
592   linux_enable_event_reporting (pid_to_ptid (pid));
593   linux_enable_tracesysgood (pid_to_ptid (pid));
594   linux_ptrace_init_warnings ();
595 }
596 
597 static void
598 linux_child_post_startup_inferior (ptid_t ptid)
599 {
600   linux_enable_event_reporting (ptid);
601   linux_enable_tracesysgood (ptid);
602   linux_ptrace_init_warnings ();
603 }
604 
605 /* Return the number of known LWPs in the tgid given by PID.  */
606 
607 static int
608 num_lwps (int pid)
609 {
610   int count = 0;
611   struct lwp_info *lp;
612 
613   for (lp = lwp_list; lp; lp = lp->next)
614     if (ptid_get_pid (lp->ptid) == pid)
615       count++;
616 
617   return count;
618 }
619 
620 /* Call delete_lwp with prototype compatible for make_cleanup.  */
621 
622 static void
623 delete_lwp_cleanup (void *lp_voidp)
624 {
625   struct lwp_info *lp = lp_voidp;
626 
627   delete_lwp (lp->ptid);
628 }
629 
630 static int
631 linux_child_follow_fork (struct target_ops *ops, int follow_child)
632 {
633   sigset_t prev_mask;
634   int has_vforked;
635   int parent_pid, child_pid;
636 
637   block_child_signals (&prev_mask);
638 
639   has_vforked = (inferior_thread ()->pending_follow.kind
640 		 == TARGET_WAITKIND_VFORKED);
641   parent_pid = ptid_get_lwp (inferior_ptid);
642   if (parent_pid == 0)
643     parent_pid = ptid_get_pid (inferior_ptid);
644   child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
645 
646   if (!detach_fork)
647     linux_enable_event_reporting (pid_to_ptid (child_pid));
648 
649   if (has_vforked
650       && !non_stop /* Non-stop always resumes both branches.  */
651       && (!target_is_async_p () || sync_execution)
652       && !(follow_child || detach_fork || sched_multi))
653     {
654       /* The parent stays blocked inside the vfork syscall until the
655 	 child execs or exits.  If we don't let the child run, then
656 	 the parent stays blocked.  If we're telling the parent to run
657 	 in the foreground, the user will not be able to ctrl-c to get
658 	 back the terminal, effectively hanging the debug session.  */
659       fprintf_filtered (gdb_stderr, _("\
660 Can not resume the parent process over vfork in the foreground while\n\
661 holding the child stopped.  Try \"set detach-on-fork\" or \
662 \"set schedule-multiple\".\n"));
663       /* FIXME output string > 80 columns.  */
664       return 1;
665     }
666 
667   if (! follow_child)
668     {
669       struct lwp_info *child_lp = NULL;
670 
671       /* We're already attached to the parent, by default.  */
672 
673       /* Detach new forked process?  */
674       if (detach_fork)
675 	{
676 	  struct cleanup *old_chain;
677 
678 	  /* Before detaching from the child, remove all breakpoints
679 	     from it.  If we forked, then this has already been taken
680 	     care of by infrun.c.  If we vforked however, any
681 	     breakpoint inserted in the parent is visible in the
682 	     child, even those added while stopped in a vfork
683 	     catchpoint.  This will remove the breakpoints from the
684 	     parent also, but they'll be reinserted below.  */
685 	  if (has_vforked)
686 	    {
687 	      /* keep breakpoints list in sync.  */
688 	      remove_breakpoints_pid (GET_PID (inferior_ptid));
689 	    }
690 
691 	  if (info_verbose || debug_linux_nat)
692 	    {
693 	      target_terminal_ours ();
694 	      fprintf_filtered (gdb_stdlog,
695 				"Detaching after fork from "
696 				"child process %d.\n",
697 				child_pid);
698 	    }
699 
700 	  old_chain = save_inferior_ptid ();
701 	  inferior_ptid = ptid_build (child_pid, child_pid, 0);
702 
703 	  child_lp = add_lwp (inferior_ptid);
704 	  child_lp->stopped = 1;
705 	  child_lp->last_resume_kind = resume_stop;
706 	  make_cleanup (delete_lwp_cleanup, child_lp);
707 
708 	  if (linux_nat_prepare_to_resume != NULL)
709 	    linux_nat_prepare_to_resume (child_lp);
710 	  ptrace (PTRACE_DETACH, child_pid, 0, 0);
711 
712 	  do_cleanups (old_chain);
713 	}
714       else
715 	{
716 	  struct inferior *parent_inf, *child_inf;
717 	  struct cleanup *old_chain;
718 
719 	  /* Add process to GDB's tables.  */
720 	  child_inf = add_inferior (child_pid);
721 
722 	  parent_inf = current_inferior ();
723 	  child_inf->attach_flag = parent_inf->attach_flag;
724 	  copy_terminal_info (child_inf, parent_inf);
725 	  child_inf->gdbarch = parent_inf->gdbarch;
726 	  copy_inferior_target_desc_info (child_inf, parent_inf);
727 
728 	  old_chain = save_inferior_ptid ();
729 	  save_current_program_space ();
730 
731 	  inferior_ptid = ptid_build (child_pid, child_pid, 0);
732 	  add_thread (inferior_ptid);
733 	  child_lp = add_lwp (inferior_ptid);
734 	  child_lp->stopped = 1;
735 	  child_lp->last_resume_kind = resume_stop;
736 	  child_inf->symfile_flags = SYMFILE_NO_READ;
737 
738 	  /* If this is a vfork child, then the address-space is
739 	     shared with the parent.  */
740 	  if (has_vforked)
741 	    {
742 	      child_inf->pspace = parent_inf->pspace;
743 	      child_inf->aspace = parent_inf->aspace;
744 
745 	      /* The parent will be frozen until the child is done
746 		 with the shared region.  Keep track of the
747 		 parent.  */
748 	      child_inf->vfork_parent = parent_inf;
749 	      child_inf->pending_detach = 0;
750 	      parent_inf->vfork_child = child_inf;
751 	      parent_inf->pending_detach = 0;
752 	    }
753 	  else
754 	    {
755 	      child_inf->aspace = new_address_space ();
756 	      child_inf->pspace = add_program_space (child_inf->aspace);
757 	      child_inf->removable = 1;
758 	      set_current_program_space (child_inf->pspace);
759 	      clone_program_space (child_inf->pspace, parent_inf->pspace);
760 
761 	      /* Let the shared library layer (solib-svr4) learn about
762 		 this new process, relocate the cloned exec, pull in
763 		 shared libraries, and install the solib event
764 		 breakpoint.  If a "cloned-VM" event was propagated
765 		 better throughout the core, this wouldn't be
766 		 required.  */
767 	      solib_create_inferior_hook (0);
768 	    }
769 
770 	  /* Let the thread_db layer learn about this new process.  */
771 	  check_for_thread_db ();
772 
773 	  do_cleanups (old_chain);
774 	}
775 
776       if (has_vforked)
777 	{
778 	  struct lwp_info *parent_lp;
779 	  struct inferior *parent_inf;
780 
781 	  parent_inf = current_inferior ();
782 
783 	  /* If we detached from the child, then we have to be careful
784 	     to not insert breakpoints in the parent until the child
785 	     is done with the shared memory region.  However, if we're
786 	     staying attached to the child, then we can and should
787 	     insert breakpoints, so that we can debug it.  A
788 	     subsequent child exec or exit is enough to know when does
789 	     the child stops using the parent's address space.  */
790 	  parent_inf->waiting_for_vfork_done = detach_fork;
791 	  parent_inf->pspace->breakpoints_not_allowed = detach_fork;
792 
793 	  parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
794 	  gdb_assert (linux_supports_tracefork_flag >= 0);
795 
796 	  if (linux_supports_tracevforkdone (0))
797 	    {
798   	      if (debug_linux_nat)
799   		fprintf_unfiltered (gdb_stdlog,
800   				    "LCFF: waiting for VFORK_DONE on %d\n",
801   				    parent_pid);
802 	      parent_lp->stopped = 1;
803 
804 	      /* We'll handle the VFORK_DONE event like any other
805 		 event, in target_wait.  */
806 	    }
807 	  else
808 	    {
809 	      /* We can't insert breakpoints until the child has
810 		 finished with the shared memory region.  We need to
811 		 wait until that happens.  Ideal would be to just
812 		 call:
813 		 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
814 		 - waitpid (parent_pid, &status, __WALL);
815 		 However, most architectures can't handle a syscall
816 		 being traced on the way out if it wasn't traced on
817 		 the way in.
818 
819 		 We might also think to loop, continuing the child
820 		 until it exits or gets a SIGTRAP.  One problem is
821 		 that the child might call ptrace with PTRACE_TRACEME.
822 
823 		 There's no simple and reliable way to figure out when
824 		 the vforked child will be done with its copy of the
825 		 shared memory.  We could step it out of the syscall,
826 		 two instructions, let it go, and then single-step the
827 		 parent once.  When we have hardware single-step, this
828 		 would work; with software single-step it could still
829 		 be made to work but we'd have to be able to insert
830 		 single-step breakpoints in the child, and we'd have
831 		 to insert -just- the single-step breakpoint in the
832 		 parent.  Very awkward.
833 
834 		 In the end, the best we can do is to make sure it
835 		 runs for a little while.  Hopefully it will be out of
836 		 range of any breakpoints we reinsert.  Usually this
837 		 is only the single-step breakpoint at vfork's return
838 		 point.  */
839 
840   	      if (debug_linux_nat)
841   		fprintf_unfiltered (gdb_stdlog,
842 				    "LCFF: no VFORK_DONE "
843 				    "support, sleeping a bit\n");
844 
845 	      usleep (10000);
846 
847 	      /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
848 		 and leave it pending.  The next linux_nat_resume call
849 		 will notice a pending event, and bypasses actually
850 		 resuming the inferior.  */
851 	      parent_lp->status = 0;
852 	      parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
853 	      parent_lp->stopped = 1;
854 
855 	      /* If we're in async mode, need to tell the event loop
856 		 there's something here to process.  */
857 	      if (target_can_async_p ())
858 		async_file_mark ();
859 	    }
860 	}
861     }
862   else
863     {
864       struct inferior *parent_inf, *child_inf;
865       struct lwp_info *child_lp;
866       struct program_space *parent_pspace;
867 
868       if (info_verbose || debug_linux_nat)
869 	{
870 	  target_terminal_ours ();
871 	  if (has_vforked)
872 	    fprintf_filtered (gdb_stdlog,
873 			      _("Attaching after process %d "
874 				"vfork to child process %d.\n"),
875 			      parent_pid, child_pid);
876 	  else
877 	    fprintf_filtered (gdb_stdlog,
878 			      _("Attaching after process %d "
879 				"fork to child process %d.\n"),
880 			      parent_pid, child_pid);
881 	}
882 
883       /* Add the new inferior first, so that the target_detach below
884 	 doesn't unpush the target.  */
885 
886       child_inf = add_inferior (child_pid);
887 
888       parent_inf = current_inferior ();
889       child_inf->attach_flag = parent_inf->attach_flag;
890       copy_terminal_info (child_inf, parent_inf);
891       child_inf->gdbarch = parent_inf->gdbarch;
892       copy_inferior_target_desc_info (child_inf, parent_inf);
893 
894       parent_pspace = parent_inf->pspace;
895 
896       /* If we're vforking, we want to hold on to the parent until the
897 	 child exits or execs.  At child exec or exit time we can
898 	 remove the old breakpoints from the parent and detach or
899 	 resume debugging it.  Otherwise, detach the parent now; we'll
900 	 want to reuse it's program/address spaces, but we can't set
901 	 them to the child before removing breakpoints from the
902 	 parent, otherwise, the breakpoints module could decide to
903 	 remove breakpoints from the wrong process (since they'd be
904 	 assigned to the same address space).  */
905 
906       if (has_vforked)
907 	{
908 	  gdb_assert (child_inf->vfork_parent == NULL);
909 	  gdb_assert (parent_inf->vfork_child == NULL);
910 	  child_inf->vfork_parent = parent_inf;
911 	  child_inf->pending_detach = 0;
912 	  parent_inf->vfork_child = child_inf;
913 	  parent_inf->pending_detach = detach_fork;
914 	  parent_inf->waiting_for_vfork_done = 0;
915 	}
916       else if (detach_fork)
917 	target_detach (NULL, 0);
918 
919       /* Note that the detach above makes PARENT_INF dangling.  */
920 
921       /* Add the child thread to the appropriate lists, and switch to
922 	 this new thread, before cloning the program space, and
923 	 informing the solib layer about this new process.  */
924 
925       inferior_ptid = ptid_build (child_pid, child_pid, 0);
926       add_thread (inferior_ptid);
927       child_lp = add_lwp (inferior_ptid);
928       child_lp->stopped = 1;
929       child_lp->last_resume_kind = resume_stop;
930 
931       /* If this is a vfork child, then the address-space is shared
932 	 with the parent.  If we detached from the parent, then we can
933 	 reuse the parent's program/address spaces.  */
934       if (has_vforked || detach_fork)
935 	{
936 	  child_inf->pspace = parent_pspace;
937 	  child_inf->aspace = child_inf->pspace->aspace;
938 	}
939       else
940 	{
941 	  child_inf->aspace = new_address_space ();
942 	  child_inf->pspace = add_program_space (child_inf->aspace);
943 	  child_inf->removable = 1;
944 	  child_inf->symfile_flags = SYMFILE_NO_READ;
945 	  set_current_program_space (child_inf->pspace);
946 	  clone_program_space (child_inf->pspace, parent_pspace);
947 
948 	  /* Let the shared library layer (solib-svr4) learn about
949 	     this new process, relocate the cloned exec, pull in
950 	     shared libraries, and install the solib event breakpoint.
951 	     If a "cloned-VM" event was propagated better throughout
952 	     the core, this wouldn't be required.  */
953 	  solib_create_inferior_hook (0);
954 	}
955 
956       /* Let the thread_db layer learn about this new process.  */
957       check_for_thread_db ();
958     }
959 
960   restore_child_signals_mask (&prev_mask);
961   return 0;
962 }
963 
964 
965 static int
966 linux_child_insert_fork_catchpoint (int pid)
967 {
968   return !linux_supports_tracefork (pid);
969 }
970 
971 static int
972 linux_child_remove_fork_catchpoint (int pid)
973 {
974   return 0;
975 }
976 
977 static int
978 linux_child_insert_vfork_catchpoint (int pid)
979 {
980   return !linux_supports_tracefork (pid);
981 }
982 
983 static int
984 linux_child_remove_vfork_catchpoint (int pid)
985 {
986   return 0;
987 }
988 
989 static int
990 linux_child_insert_exec_catchpoint (int pid)
991 {
992   return !linux_supports_tracefork (pid);
993 }
994 
995 static int
996 linux_child_remove_exec_catchpoint (int pid)
997 {
998   return 0;
999 }
1000 
1001 static int
1002 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
1003 				    int table_size, int *table)
1004 {
1005   if (!linux_supports_tracesysgood (pid))
1006     return 1;
1007 
1008   /* On GNU/Linux, we ignore the arguments.  It means that we only
1009      enable the syscall catchpoints, but do not disable them.
1010 
1011      Also, we do not use the `table' information because we do not
1012      filter system calls here.  We let GDB do the logic for us.  */
1013   return 0;
1014 }
1015 
1016 /* On GNU/Linux there are no real LWP's.  The closest thing to LWP's
1017    are processes sharing the same VM space.  A multi-threaded process
1018    is basically a group of such processes.  However, such a grouping
1019    is almost entirely a user-space issue; the kernel doesn't enforce
1020    such a grouping at all (this might change in the future).  In
1021    general, we'll rely on the threads library (i.e. the GNU/Linux
1022    Threads library) to provide such a grouping.
1023 
1024    It is perfectly well possible to write a multi-threaded application
1025    without the assistance of a threads library, by using the clone
1026    system call directly.  This module should be able to give some
1027    rudimentary support for debugging such applications if developers
1028    specify the CLONE_PTRACE flag in the clone system call, and are
1029    using the Linux kernel 2.4 or above.
1030 
1031    Note that there are some peculiarities in GNU/Linux that affect
1032    this code:
1033 
1034    - In general one should specify the __WCLONE flag to waitpid in
1035      order to make it report events for any of the cloned processes
1036      (and leave it out for the initial process).  However, if a cloned
1037      process has exited the exit status is only reported if the
1038      __WCLONE flag is absent.  Linux kernel 2.4 has a __WALL flag, but
1039      we cannot use it since GDB must work on older systems too.
1040 
1041    - When a traced, cloned process exits and is waited for by the
1042      debugger, the kernel reassigns it to the original parent and
1043      keeps it around as a "zombie".  Somehow, the GNU/Linux Threads
1044      library doesn't notice this, which leads to the "zombie problem":
1045      When debugged a multi-threaded process that spawns a lot of
1046      threads will run out of processes, even if the threads exit,
1047      because the "zombies" stay around.  */
1048 
1049 /* List of known LWPs.  */
1050 struct lwp_info *lwp_list;
1051 
1052 
1053 /* Original signal mask.  */
1054 static sigset_t normal_mask;
1055 
1056 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1057    _initialize_linux_nat.  */
1058 static sigset_t suspend_mask;
1059 
1060 /* Signals to block to make that sigsuspend work.  */
1061 static sigset_t blocked_mask;
1062 
1063 /* SIGCHLD action.  */
1064 struct sigaction sigchld_action;
1065 
1066 /* Block child signals (SIGCHLD and linux threads signals), and store
1067    the previous mask in PREV_MASK.  */
1068 
1069 static void
1070 block_child_signals (sigset_t *prev_mask)
1071 {
1072   /* Make sure SIGCHLD is blocked.  */
1073   if (!sigismember (&blocked_mask, SIGCHLD))
1074     sigaddset (&blocked_mask, SIGCHLD);
1075 
1076   sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1077 }
1078 
1079 /* Restore child signals mask, previously returned by
1080    block_child_signals.  */
1081 
1082 static void
1083 restore_child_signals_mask (sigset_t *prev_mask)
1084 {
1085   sigprocmask (SIG_SETMASK, prev_mask, NULL);
1086 }
1087 
1088 /* Mask of signals to pass directly to the inferior.  */
1089 static sigset_t pass_mask;
1090 
1091 /* Update signals to pass to the inferior.  */
1092 static void
1093 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1094 {
1095   int signo;
1096 
1097   sigemptyset (&pass_mask);
1098 
1099   for (signo = 1; signo < NSIG; signo++)
1100     {
1101       int target_signo = gdb_signal_from_host (signo);
1102       if (target_signo < numsigs && pass_signals[target_signo])
1103         sigaddset (&pass_mask, signo);
1104     }
1105 }
1106 
1107 
1108 
1109 /* Prototypes for local functions.  */
1110 static int stop_wait_callback (struct lwp_info *lp, void *data);
1111 static int linux_thread_alive (ptid_t ptid);
1112 static char *linux_child_pid_to_exec_file (int pid);
1113 
1114 
1115 /* Convert wait status STATUS to a string.  Used for printing debug
1116    messages only.  */
1117 
1118 static char *
1119 status_to_str (int status)
1120 {
1121   static char buf[64];
1122 
1123   if (WIFSTOPPED (status))
1124     {
1125       if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1126 	snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1127 		  strsignal (SIGTRAP));
1128       else
1129 	snprintf (buf, sizeof (buf), "%s (stopped)",
1130 		  strsignal (WSTOPSIG (status)));
1131     }
1132   else if (WIFSIGNALED (status))
1133     snprintf (buf, sizeof (buf), "%s (terminated)",
1134 	      strsignal (WTERMSIG (status)));
1135   else
1136     snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1137 
1138   return buf;
1139 }
1140 
1141 /* Destroy and free LP.  */
1142 
1143 static void
1144 lwp_free (struct lwp_info *lp)
1145 {
1146   xfree (lp->arch_private);
1147   xfree (lp);
1148 }
1149 
1150 /* Remove all LWPs belong to PID from the lwp list.  */
1151 
1152 static void
1153 purge_lwp_list (int pid)
1154 {
1155   struct lwp_info *lp, *lpprev, *lpnext;
1156 
1157   lpprev = NULL;
1158 
1159   for (lp = lwp_list; lp; lp = lpnext)
1160     {
1161       lpnext = lp->next;
1162 
1163       if (ptid_get_pid (lp->ptid) == pid)
1164 	{
1165 	  if (lp == lwp_list)
1166 	    lwp_list = lp->next;
1167 	  else
1168 	    lpprev->next = lp->next;
1169 
1170 	  lwp_free (lp);
1171 	}
1172       else
1173 	lpprev = lp;
1174     }
1175 }
1176 
1177 /* Add the LWP specified by PTID to the list.  PTID is the first LWP
1178    in the process.  Return a pointer to the structure describing the
1179    new LWP.
1180 
1181    This differs from add_lwp in that we don't let the arch specific
1182    bits know about this new thread.  Current clients of this callback
1183    take the opportunity to install watchpoints in the new thread, and
1184    we shouldn't do that for the first thread.  If we're spawning a
1185    child ("run"), the thread executes the shell wrapper first, and we
1186    shouldn't touch it until it execs the program we want to debug.
1187    For "attach", it'd be okay to call the callback, but it's not
1188    necessary, because watchpoints can't yet have been inserted into
1189    the inferior.  */
1190 
1191 static struct lwp_info *
1192 add_initial_lwp (ptid_t ptid)
1193 {
1194   struct lwp_info *lp;
1195 
1196   gdb_assert (is_lwp (ptid));
1197 
1198   lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1199 
1200   memset (lp, 0, sizeof (struct lwp_info));
1201 
1202   lp->last_resume_kind = resume_continue;
1203   lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1204 
1205   lp->ptid = ptid;
1206   lp->core = -1;
1207 
1208   lp->next = lwp_list;
1209   lwp_list = lp;
1210 
1211   return lp;
1212 }
1213 
1214 /* Add the LWP specified by PID to the list.  Return a pointer to the
1215    structure describing the new LWP.  The LWP should already be
1216    stopped.  */
1217 
1218 static struct lwp_info *
1219 add_lwp (ptid_t ptid)
1220 {
1221   struct lwp_info *lp;
1222 
1223   lp = add_initial_lwp (ptid);
1224 
1225   /* Let the arch specific bits know about this new thread.  Current
1226      clients of this callback take the opportunity to install
1227      watchpoints in the new thread.  We don't do this for the first
1228      thread though.  See add_initial_lwp.  */
1229   if (linux_nat_new_thread != NULL)
1230     linux_nat_new_thread (lp);
1231 
1232   return lp;
1233 }
1234 
1235 /* Remove the LWP specified by PID from the list.  */
1236 
1237 static void
1238 delete_lwp (ptid_t ptid)
1239 {
1240   struct lwp_info *lp, *lpprev;
1241 
1242   lpprev = NULL;
1243 
1244   for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1245     if (ptid_equal (lp->ptid, ptid))
1246       break;
1247 
1248   if (!lp)
1249     return;
1250 
1251   if (lpprev)
1252     lpprev->next = lp->next;
1253   else
1254     lwp_list = lp->next;
1255 
1256   lwp_free (lp);
1257 }
1258 
1259 /* Return a pointer to the structure describing the LWP corresponding
1260    to PID.  If no corresponding LWP could be found, return NULL.  */
1261 
1262 static struct lwp_info *
1263 find_lwp_pid (ptid_t ptid)
1264 {
1265   struct lwp_info *lp;
1266   int lwp;
1267 
1268   if (is_lwp (ptid))
1269     lwp = GET_LWP (ptid);
1270   else
1271     lwp = GET_PID (ptid);
1272 
1273   for (lp = lwp_list; lp; lp = lp->next)
1274     if (lwp == GET_LWP (lp->ptid))
1275       return lp;
1276 
1277   return NULL;
1278 }
1279 
1280 /* Call CALLBACK with its second argument set to DATA for every LWP in
1281    the list.  If CALLBACK returns 1 for a particular LWP, return a
1282    pointer to the structure describing that LWP immediately.
1283    Otherwise return NULL.  */
1284 
1285 struct lwp_info *
1286 iterate_over_lwps (ptid_t filter,
1287 		   int (*callback) (struct lwp_info *, void *),
1288 		   void *data)
1289 {
1290   struct lwp_info *lp, *lpnext;
1291 
1292   for (lp = lwp_list; lp; lp = lpnext)
1293     {
1294       lpnext = lp->next;
1295 
1296       if (ptid_match (lp->ptid, filter))
1297 	{
1298 	  if ((*callback) (lp, data))
1299 	    return lp;
1300 	}
1301     }
1302 
1303   return NULL;
1304 }
1305 
1306 /* Update our internal state when changing from one checkpoint to
1307    another indicated by NEW_PTID.  We can only switch single-threaded
1308    applications, so we only create one new LWP, and the previous list
1309    is discarded.  */
1310 
1311 void
1312 linux_nat_switch_fork (ptid_t new_ptid)
1313 {
1314   struct lwp_info *lp;
1315 
1316   purge_lwp_list (GET_PID (inferior_ptid));
1317 
1318   lp = add_lwp (new_ptid);
1319   lp->stopped = 1;
1320 
1321   /* This changes the thread's ptid while preserving the gdb thread
1322      num.  Also changes the inferior pid, while preserving the
1323      inferior num.  */
1324   thread_change_ptid (inferior_ptid, new_ptid);
1325 
1326   /* We've just told GDB core that the thread changed target id, but,
1327      in fact, it really is a different thread, with different register
1328      contents.  */
1329   registers_changed ();
1330 }
1331 
1332 /* Handle the exit of a single thread LP.  */
1333 
1334 static void
1335 exit_lwp (struct lwp_info *lp)
1336 {
1337   struct thread_info *th = find_thread_ptid (lp->ptid);
1338 
1339   if (th)
1340     {
1341       if (print_thread_events)
1342 	printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1343 
1344       delete_thread (lp->ptid);
1345     }
1346 
1347   delete_lwp (lp->ptid);
1348 }
1349 
1350 /* Wait for the LWP specified by LP, which we have just attached to.
1351    Returns a wait status for that LWP, to cache.  */
1352 
1353 static int
1354 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1355 			    int *signalled)
1356 {
1357   pid_t new_pid, pid = GET_LWP (ptid);
1358   int status;
1359 
1360   if (linux_proc_pid_is_stopped (pid))
1361     {
1362       if (debug_linux_nat)
1363 	fprintf_unfiltered (gdb_stdlog,
1364 			    "LNPAW: Attaching to a stopped process\n");
1365 
1366       /* The process is definitely stopped.  It is in a job control
1367 	 stop, unless the kernel predates the TASK_STOPPED /
1368 	 TASK_TRACED distinction, in which case it might be in a
1369 	 ptrace stop.  Make sure it is in a ptrace stop; from there we
1370 	 can kill it, signal it, et cetera.
1371 
1372          First make sure there is a pending SIGSTOP.  Since we are
1373 	 already attached, the process can not transition from stopped
1374 	 to running without a PTRACE_CONT; so we know this signal will
1375 	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
1376 	 probably already in the queue (unless this kernel is old
1377 	 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1378 	 is not an RT signal, it can only be queued once.  */
1379       kill_lwp (pid, SIGSTOP);
1380 
1381       /* Finally, resume the stopped process.  This will deliver the SIGSTOP
1382 	 (or a higher priority signal, just like normal PTRACE_ATTACH).  */
1383       ptrace (PTRACE_CONT, pid, 0, 0);
1384     }
1385 
1386   /* Make sure the initial process is stopped.  The user-level threads
1387      layer might want to poke around in the inferior, and that won't
1388      work if things haven't stabilized yet.  */
1389   new_pid = my_waitpid (pid, &status, 0);
1390   if (new_pid == -1 && errno == ECHILD)
1391     {
1392       if (first)
1393 	warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1394 
1395       /* Try again with __WCLONE to check cloned processes.  */
1396       new_pid = my_waitpid (pid, &status, __WCLONE);
1397       *cloned = 1;
1398     }
1399 
1400   gdb_assert (pid == new_pid);
1401 
1402   if (!WIFSTOPPED (status))
1403     {
1404       /* The pid we tried to attach has apparently just exited.  */
1405       if (debug_linux_nat)
1406 	fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1407 			    pid, status_to_str (status));
1408       return status;
1409     }
1410 
1411   if (WSTOPSIG (status) != SIGSTOP)
1412     {
1413       *signalled = 1;
1414       if (debug_linux_nat)
1415 	fprintf_unfiltered (gdb_stdlog,
1416 			    "LNPAW: Received %s after attaching\n",
1417 			    status_to_str (status));
1418     }
1419 
1420   return status;
1421 }
1422 
1423 /* Attach to the LWP specified by PID.  Return 0 if successful, -1 if
1424    the new LWP could not be attached, or 1 if we're already auto
1425    attached to this thread, but haven't processed the
1426    PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1427    its existance, without considering it an error.  */
1428 
1429 int
1430 lin_lwp_attach_lwp (ptid_t ptid)
1431 {
1432   struct lwp_info *lp;
1433   sigset_t prev_mask;
1434   int lwpid;
1435 
1436   gdb_assert (is_lwp (ptid));
1437 
1438   block_child_signals (&prev_mask);
1439 
1440   lp = find_lwp_pid (ptid);
1441   lwpid = GET_LWP (ptid);
1442 
1443   /* We assume that we're already attached to any LWP that has an id
1444      equal to the overall process id, and to any LWP that is already
1445      in our list of LWPs.  If we're not seeing exit events from threads
1446      and we've had PID wraparound since we last tried to stop all threads,
1447      this assumption might be wrong; fortunately, this is very unlikely
1448      to happen.  */
1449   if (lwpid != GET_PID (ptid) && lp == NULL)
1450     {
1451       int status, cloned = 0, signalled = 0;
1452 
1453       if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1454 	{
1455 	  if (linux_supports_tracefork_flag)
1456 	    {
1457 	      /* If we haven't stopped all threads when we get here,
1458 		 we may have seen a thread listed in thread_db's list,
1459 		 but not processed the PTRACE_EVENT_CLONE yet.  If
1460 		 that's the case, ignore this new thread, and let
1461 		 normal event handling discover it later.  */
1462 	      if (in_pid_list_p (stopped_pids, lwpid))
1463 		{
1464 		  /* We've already seen this thread stop, but we
1465 		     haven't seen the PTRACE_EVENT_CLONE extended
1466 		     event yet.  */
1467 		  restore_child_signals_mask (&prev_mask);
1468 		  return 0;
1469 		}
1470 	      else
1471 		{
1472 		  int new_pid;
1473 		  int status;
1474 
1475 		  /* See if we've got a stop for this new child
1476 		     pending.  If so, we're already attached.  */
1477 		  new_pid = my_waitpid (lwpid, &status, WNOHANG);
1478 		  if (new_pid == -1 && errno == ECHILD)
1479 		    new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1480 		  if (new_pid != -1)
1481 		    {
1482 		      if (WIFSTOPPED (status))
1483 			add_to_pid_list (&stopped_pids, lwpid, status);
1484 
1485 		      restore_child_signals_mask (&prev_mask);
1486 		      return 1;
1487 		    }
1488 		}
1489 	    }
1490 
1491 	  /* If we fail to attach to the thread, issue a warning,
1492 	     but continue.  One way this can happen is if thread
1493 	     creation is interrupted; as of Linux kernel 2.6.19, a
1494 	     bug may place threads in the thread list and then fail
1495 	     to create them.  */
1496 	  warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1497 		   safe_strerror (errno));
1498 	  restore_child_signals_mask (&prev_mask);
1499 	  return -1;
1500 	}
1501 
1502       if (debug_linux_nat)
1503 	fprintf_unfiltered (gdb_stdlog,
1504 			    "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1505 			    target_pid_to_str (ptid));
1506 
1507       status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1508       if (!WIFSTOPPED (status))
1509 	{
1510 	  restore_child_signals_mask (&prev_mask);
1511 	  return 1;
1512 	}
1513 
1514       lp = add_lwp (ptid);
1515       lp->stopped = 1;
1516       lp->cloned = cloned;
1517       lp->signalled = signalled;
1518       if (WSTOPSIG (status) != SIGSTOP)
1519 	{
1520 	  lp->resumed = 1;
1521 	  lp->status = status;
1522 	}
1523 
1524       target_post_attach (GET_LWP (lp->ptid));
1525 
1526       if (debug_linux_nat)
1527 	{
1528 	  fprintf_unfiltered (gdb_stdlog,
1529 			      "LLAL: waitpid %s received %s\n",
1530 			      target_pid_to_str (ptid),
1531 			      status_to_str (status));
1532 	}
1533     }
1534   else
1535     {
1536       /* We assume that the LWP representing the original process is
1537          already stopped.  Mark it as stopped in the data structure
1538          that the GNU/linux ptrace layer uses to keep track of
1539          threads.  Note that this won't have already been done since
1540          the main thread will have, we assume, been stopped by an
1541          attach from a different layer.  */
1542       if (lp == NULL)
1543 	lp = add_lwp (ptid);
1544       lp->stopped = 1;
1545     }
1546 
1547   lp->last_resume_kind = resume_stop;
1548   restore_child_signals_mask (&prev_mask);
1549   return 0;
1550 }
1551 
1552 static void
1553 linux_nat_create_inferior (struct target_ops *ops,
1554 			   char *exec_file, char *allargs, char **env,
1555 			   int from_tty)
1556 {
1557 #ifdef HAVE_PERSONALITY
1558   int personality_orig = 0, personality_set = 0;
1559 #endif /* HAVE_PERSONALITY */
1560 
1561   /* The fork_child mechanism is synchronous and calls target_wait, so
1562      we have to mask the async mode.  */
1563 
1564 #ifdef HAVE_PERSONALITY
1565   if (disable_randomization)
1566     {
1567       errno = 0;
1568       personality_orig = personality (0xffffffff);
1569       if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1570 	{
1571 	  personality_set = 1;
1572 	  personality (personality_orig | ADDR_NO_RANDOMIZE);
1573 	}
1574       if (errno != 0 || (personality_set
1575 			 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1576 	warning (_("Error disabling address space randomization: %s"),
1577 		 safe_strerror (errno));
1578     }
1579 #endif /* HAVE_PERSONALITY */
1580 
1581   /* Make sure we report all signals during startup.  */
1582   linux_nat_pass_signals (0, NULL);
1583 
1584   linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1585 
1586 #ifdef HAVE_PERSONALITY
1587   if (personality_set)
1588     {
1589       errno = 0;
1590       personality (personality_orig);
1591       if (errno != 0)
1592 	warning (_("Error restoring address space randomization: %s"),
1593 		 safe_strerror (errno));
1594     }
1595 #endif /* HAVE_PERSONALITY */
1596 }
1597 
1598 static void
1599 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1600 {
1601   struct lwp_info *lp;
1602   int status;
1603   ptid_t ptid;
1604   volatile struct gdb_exception ex;
1605 
1606   /* Make sure we report all signals during attach.  */
1607   linux_nat_pass_signals (0, NULL);
1608 
1609   TRY_CATCH (ex, RETURN_MASK_ERROR)
1610     {
1611       linux_ops->to_attach (ops, args, from_tty);
1612     }
1613   if (ex.reason < 0)
1614     {
1615       pid_t pid = parse_pid_to_attach (args);
1616       struct buffer buffer;
1617       char *message, *buffer_s;
1618 
1619       message = xstrdup (ex.message);
1620       make_cleanup (xfree, message);
1621 
1622       buffer_init (&buffer);
1623       linux_ptrace_attach_warnings (pid, &buffer);
1624 
1625       buffer_grow_str0 (&buffer, "");
1626       buffer_s = buffer_finish (&buffer);
1627       make_cleanup (xfree, buffer_s);
1628 
1629       throw_error (ex.error, "%s%s", buffer_s, message);
1630     }
1631 
1632   /* The ptrace base target adds the main thread with (pid,0,0)
1633      format.  Decorate it with lwp info.  */
1634   ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1635   thread_change_ptid (inferior_ptid, ptid);
1636 
1637   /* Add the initial process as the first LWP to the list.  */
1638   lp = add_initial_lwp (ptid);
1639 
1640   status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1641 				       &lp->signalled);
1642   if (!WIFSTOPPED (status))
1643     {
1644       if (WIFEXITED (status))
1645 	{
1646 	  int exit_code = WEXITSTATUS (status);
1647 
1648 	  target_terminal_ours ();
1649 	  target_mourn_inferior ();
1650 	  if (exit_code == 0)
1651 	    error (_("Unable to attach: program exited normally."));
1652 	  else
1653 	    error (_("Unable to attach: program exited with code %d."),
1654 		   exit_code);
1655 	}
1656       else if (WIFSIGNALED (status))
1657 	{
1658 	  enum gdb_signal signo;
1659 
1660 	  target_terminal_ours ();
1661 	  target_mourn_inferior ();
1662 
1663 	  signo = gdb_signal_from_host (WTERMSIG (status));
1664 	  error (_("Unable to attach: program terminated with signal "
1665 		   "%s, %s."),
1666 		 gdb_signal_to_name (signo),
1667 		 gdb_signal_to_string (signo));
1668 	}
1669 
1670       internal_error (__FILE__, __LINE__,
1671 		      _("unexpected status %d for PID %ld"),
1672 		      status, (long) GET_LWP (ptid));
1673     }
1674 
1675   lp->stopped = 1;
1676 
1677   /* Save the wait status to report later.  */
1678   lp->resumed = 1;
1679   if (debug_linux_nat)
1680     fprintf_unfiltered (gdb_stdlog,
1681 			"LNA: waitpid %ld, saving status %s\n",
1682 			(long) GET_PID (lp->ptid), status_to_str (status));
1683 
1684   lp->status = status;
1685 
1686   if (target_can_async_p ())
1687     target_async (inferior_event_handler, 0);
1688 }
1689 
1690 /* Get pending status of LP.  */
1691 static int
1692 get_pending_status (struct lwp_info *lp, int *status)
1693 {
1694   enum gdb_signal signo = GDB_SIGNAL_0;
1695 
1696   /* If we paused threads momentarily, we may have stored pending
1697      events in lp->status or lp->waitstatus (see stop_wait_callback),
1698      and GDB core hasn't seen any signal for those threads.
1699      Otherwise, the last signal reported to the core is found in the
1700      thread object's stop_signal.
1701 
1702      There's a corner case that isn't handled here at present.  Only
1703      if the thread stopped with a TARGET_WAITKIND_STOPPED does
1704      stop_signal make sense as a real signal to pass to the inferior.
1705      Some catchpoint related events, like
1706      TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1707      to GDB_SIGNAL_SIGTRAP when the catchpoint triggers.  But,
1708      those traps are debug API (ptrace in our case) related and
1709      induced; the inferior wouldn't see them if it wasn't being
1710      traced.  Hence, we should never pass them to the inferior, even
1711      when set to pass state.  Since this corner case isn't handled by
1712      infrun.c when proceeding with a signal, for consistency, neither
1713      do we handle it here (or elsewhere in the file we check for
1714      signal pass state).  Normally SIGTRAP isn't set to pass state, so
1715      this is really a corner case.  */
1716 
1717   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1718     signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal.  */
1719   else if (lp->status)
1720     signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1721   else if (non_stop && !is_executing (lp->ptid))
1722     {
1723       struct thread_info *tp = find_thread_ptid (lp->ptid);
1724 
1725       signo = tp->suspend.stop_signal;
1726     }
1727   else if (!non_stop)
1728     {
1729       struct target_waitstatus last;
1730       ptid_t last_ptid;
1731 
1732       get_last_target_status (&last_ptid, &last);
1733 
1734       if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1735 	{
1736 	  struct thread_info *tp = find_thread_ptid (lp->ptid);
1737 
1738 	  signo = tp->suspend.stop_signal;
1739 	}
1740     }
1741 
1742   *status = 0;
1743 
1744   if (signo == GDB_SIGNAL_0)
1745     {
1746       if (debug_linux_nat)
1747 	fprintf_unfiltered (gdb_stdlog,
1748 			    "GPT: lwp %s has no pending signal\n",
1749 			    target_pid_to_str (lp->ptid));
1750     }
1751   else if (!signal_pass_state (signo))
1752     {
1753       if (debug_linux_nat)
1754 	fprintf_unfiltered (gdb_stdlog,
1755 			    "GPT: lwp %s had signal %s, "
1756 			    "but it is in no pass state\n",
1757 			    target_pid_to_str (lp->ptid),
1758 			    gdb_signal_to_string (signo));
1759     }
1760   else
1761     {
1762       *status = W_STOPCODE (gdb_signal_to_host (signo));
1763 
1764       if (debug_linux_nat)
1765 	fprintf_unfiltered (gdb_stdlog,
1766 			    "GPT: lwp %s has pending signal %s\n",
1767 			    target_pid_to_str (lp->ptid),
1768 			    gdb_signal_to_string (signo));
1769     }
1770 
1771   return 0;
1772 }
1773 
1774 static int
1775 detach_callback (struct lwp_info *lp, void *data)
1776 {
1777   gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1778 
1779   if (debug_linux_nat && lp->status)
1780     fprintf_unfiltered (gdb_stdlog, "DC:  Pending %s for %s on detach.\n",
1781 			strsignal (WSTOPSIG (lp->status)),
1782 			target_pid_to_str (lp->ptid));
1783 
1784   /* If there is a pending SIGSTOP, get rid of it.  */
1785   if (lp->signalled)
1786     {
1787       if (debug_linux_nat)
1788 	fprintf_unfiltered (gdb_stdlog,
1789 			    "DC: Sending SIGCONT to %s\n",
1790 			    target_pid_to_str (lp->ptid));
1791 
1792       kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1793       lp->signalled = 0;
1794     }
1795 
1796   /* We don't actually detach from the LWP that has an id equal to the
1797      overall process id just yet.  */
1798   if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1799     {
1800       int status = 0;
1801 
1802       /* Pass on any pending signal for this LWP.  */
1803       get_pending_status (lp, &status);
1804 
1805       if (linux_nat_prepare_to_resume != NULL)
1806 	linux_nat_prepare_to_resume (lp);
1807       errno = 0;
1808       if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1809 		  WSTOPSIG (status)) < 0)
1810 	error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1811 	       safe_strerror (errno));
1812 
1813       if (debug_linux_nat)
1814 	fprintf_unfiltered (gdb_stdlog,
1815 			    "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1816 			    target_pid_to_str (lp->ptid),
1817 			    strsignal (WSTOPSIG (status)));
1818 
1819       delete_lwp (lp->ptid);
1820     }
1821 
1822   return 0;
1823 }
1824 
1825 static void
1826 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1827 {
1828   int pid;
1829   int status;
1830   struct lwp_info *main_lwp;
1831 
1832   pid = GET_PID (inferior_ptid);
1833 
1834   /* Don't unregister from the event loop, as there may be other
1835      inferiors running. */
1836 
1837   /* Stop all threads before detaching.  ptrace requires that the
1838      thread is stopped to sucessfully detach.  */
1839   iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1840   /* ... and wait until all of them have reported back that
1841      they're no longer running.  */
1842   iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1843 
1844   iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1845 
1846   /* Only the initial process should be left right now.  */
1847   gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1848 
1849   main_lwp = find_lwp_pid (pid_to_ptid (pid));
1850 
1851   /* Pass on any pending signal for the last LWP.  */
1852   if ((args == NULL || *args == '\0')
1853       && get_pending_status (main_lwp, &status) != -1
1854       && WIFSTOPPED (status))
1855     {
1856       /* Put the signal number in ARGS so that inf_ptrace_detach will
1857 	 pass it along with PTRACE_DETACH.  */
1858       args = alloca (8);
1859       sprintf (args, "%d", (int) WSTOPSIG (status));
1860       if (debug_linux_nat)
1861 	fprintf_unfiltered (gdb_stdlog,
1862 			    "LND: Sending signal %s to %s\n",
1863 			    args,
1864 			    target_pid_to_str (main_lwp->ptid));
1865     }
1866 
1867   if (linux_nat_prepare_to_resume != NULL)
1868     linux_nat_prepare_to_resume (main_lwp);
1869   delete_lwp (main_lwp->ptid);
1870 
1871   if (forks_exist_p ())
1872     {
1873       /* Multi-fork case.  The current inferior_ptid is being detached
1874 	 from, but there are other viable forks to debug.  Detach from
1875 	 the current fork, and context-switch to the first
1876 	 available.  */
1877       linux_fork_detach (args, from_tty);
1878     }
1879   else
1880     linux_ops->to_detach (ops, args, from_tty);
1881 }
1882 
1883 /* Resume LP.  */
1884 
1885 static void
1886 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1887 {
1888   if (lp->stopped)
1889     {
1890       struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1891 
1892       if (inf->vfork_child != NULL)
1893 	{
1894 	  if (debug_linux_nat)
1895 	    fprintf_unfiltered (gdb_stdlog,
1896 				"RC: Not resuming %s (vfork parent)\n",
1897 				target_pid_to_str (lp->ptid));
1898 	}
1899       else if (lp->status == 0
1900 	       && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1901 	{
1902 	  if (debug_linux_nat)
1903 	    fprintf_unfiltered (gdb_stdlog,
1904 				"RC: Resuming sibling %s, %s, %s\n",
1905 				target_pid_to_str (lp->ptid),
1906 				(signo != GDB_SIGNAL_0
1907 				 ? strsignal (gdb_signal_to_host (signo))
1908 				 : "0"),
1909 				step ? "step" : "resume");
1910 
1911 	  if (linux_nat_prepare_to_resume != NULL)
1912 	    linux_nat_prepare_to_resume (lp);
1913 	  linux_ops->to_resume (linux_ops,
1914 				pid_to_ptid (GET_LWP (lp->ptid)),
1915 				step, signo);
1916 	  lp->stopped = 0;
1917 	  lp->step = step;
1918 	  lp->stopped_by_watchpoint = 0;
1919 	}
1920       else
1921 	{
1922 	  if (debug_linux_nat)
1923 	    fprintf_unfiltered (gdb_stdlog,
1924 				"RC: Not resuming sibling %s (has pending)\n",
1925 				target_pid_to_str (lp->ptid));
1926 	}
1927     }
1928   else
1929     {
1930       if (debug_linux_nat)
1931 	fprintf_unfiltered (gdb_stdlog,
1932 			    "RC: Not resuming sibling %s (not stopped)\n",
1933 			    target_pid_to_str (lp->ptid));
1934     }
1935 }
1936 
1937 /* Resume LWP, with the last stop signal, if it is in pass state.  */
1938 
1939 static int
1940 linux_nat_resume_callback (struct lwp_info *lp, void *data)
1941 {
1942   enum gdb_signal signo = GDB_SIGNAL_0;
1943 
1944   if (lp->stopped)
1945     {
1946       struct thread_info *thread;
1947 
1948       thread = find_thread_ptid (lp->ptid);
1949       if (thread != NULL)
1950 	{
1951 	  if (signal_pass_state (thread->suspend.stop_signal))
1952 	    signo = thread->suspend.stop_signal;
1953 	  thread->suspend.stop_signal = GDB_SIGNAL_0;
1954 	}
1955     }
1956 
1957   resume_lwp (lp, 0, signo);
1958   return 0;
1959 }
1960 
1961 static int
1962 resume_clear_callback (struct lwp_info *lp, void *data)
1963 {
1964   lp->resumed = 0;
1965   lp->last_resume_kind = resume_stop;
1966   return 0;
1967 }
1968 
1969 static int
1970 resume_set_callback (struct lwp_info *lp, void *data)
1971 {
1972   lp->resumed = 1;
1973   lp->last_resume_kind = resume_continue;
1974   return 0;
1975 }
1976 
1977 static void
1978 linux_nat_resume (struct target_ops *ops,
1979 		  ptid_t ptid, int step, enum gdb_signal signo)
1980 {
1981   sigset_t prev_mask;
1982   struct lwp_info *lp;
1983   int resume_many;
1984 
1985   if (debug_linux_nat)
1986     fprintf_unfiltered (gdb_stdlog,
1987 			"LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1988 			step ? "step" : "resume",
1989 			target_pid_to_str (ptid),
1990 			(signo != GDB_SIGNAL_0
1991 			 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1992 			target_pid_to_str (inferior_ptid));
1993 
1994   block_child_signals (&prev_mask);
1995 
1996   /* A specific PTID means `step only this process id'.  */
1997   resume_many = (ptid_equal (minus_one_ptid, ptid)
1998 		 || ptid_is_pid (ptid));
1999 
2000   /* Mark the lwps we're resuming as resumed.  */
2001   iterate_over_lwps (ptid, resume_set_callback, NULL);
2002 
2003   /* See if it's the current inferior that should be handled
2004      specially.  */
2005   if (resume_many)
2006     lp = find_lwp_pid (inferior_ptid);
2007   else
2008     lp = find_lwp_pid (ptid);
2009   gdb_assert (lp != NULL);
2010 
2011   /* Remember if we're stepping.  */
2012   lp->step = step;
2013   lp->last_resume_kind = step ? resume_step : resume_continue;
2014 
2015   /* If we have a pending wait status for this thread, there is no
2016      point in resuming the process.  But first make sure that
2017      linux_nat_wait won't preemptively handle the event - we
2018      should never take this short-circuit if we are going to
2019      leave LP running, since we have skipped resuming all the
2020      other threads.  This bit of code needs to be synchronized
2021      with linux_nat_wait.  */
2022 
2023   if (lp->status && WIFSTOPPED (lp->status))
2024     {
2025       if (!lp->step
2026 	  && WSTOPSIG (lp->status)
2027 	  && sigismember (&pass_mask, WSTOPSIG (lp->status)))
2028 	{
2029 	  if (debug_linux_nat)
2030 	    fprintf_unfiltered (gdb_stdlog,
2031 				"LLR: Not short circuiting for ignored "
2032 				"status 0x%x\n", lp->status);
2033 
2034 	  /* FIXME: What should we do if we are supposed to continue
2035 	     this thread with a signal?  */
2036 	  gdb_assert (signo == GDB_SIGNAL_0);
2037 	  signo = gdb_signal_from_host (WSTOPSIG (lp->status));
2038 	  lp->status = 0;
2039 	}
2040     }
2041 
2042   if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2043     {
2044       /* FIXME: What should we do if we are supposed to continue
2045 	 this thread with a signal?  */
2046       gdb_assert (signo == GDB_SIGNAL_0);
2047 
2048       if (debug_linux_nat)
2049 	fprintf_unfiltered (gdb_stdlog,
2050 			    "LLR: Short circuiting for status 0x%x\n",
2051 			    lp->status);
2052 
2053       restore_child_signals_mask (&prev_mask);
2054       if (target_can_async_p ())
2055 	{
2056 	  target_async (inferior_event_handler, 0);
2057 	  /* Tell the event loop we have something to process.  */
2058 	  async_file_mark ();
2059 	}
2060       return;
2061     }
2062 
2063   /* Mark LWP as not stopped to prevent it from being continued by
2064      linux_nat_resume_callback.  */
2065   lp->stopped = 0;
2066 
2067   if (resume_many)
2068     iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
2069 
2070   /* Convert to something the lower layer understands.  */
2071   ptid = pid_to_ptid (GET_LWP (lp->ptid));
2072 
2073   if (linux_nat_prepare_to_resume != NULL)
2074     linux_nat_prepare_to_resume (lp);
2075   linux_ops->to_resume (linux_ops, ptid, step, signo);
2076   lp->stopped_by_watchpoint = 0;
2077 
2078   if (debug_linux_nat)
2079     fprintf_unfiltered (gdb_stdlog,
2080 			"LLR: %s %s, %s (resume event thread)\n",
2081 			step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2082 			target_pid_to_str (ptid),
2083 			(signo != GDB_SIGNAL_0
2084 			 ? strsignal (gdb_signal_to_host (signo)) : "0"));
2085 
2086   restore_child_signals_mask (&prev_mask);
2087   if (target_can_async_p ())
2088     target_async (inferior_event_handler, 0);
2089 }
2090 
2091 /* Send a signal to an LWP.  */
2092 
2093 static int
2094 kill_lwp (int lwpid, int signo)
2095 {
2096   /* Use tkill, if possible, in case we are using nptl threads.  If tkill
2097      fails, then we are not using nptl threads and we should be using kill.  */
2098 
2099 #ifdef HAVE_TKILL_SYSCALL
2100   {
2101     static int tkill_failed;
2102 
2103     if (!tkill_failed)
2104       {
2105 	int ret;
2106 
2107 	errno = 0;
2108 	ret = syscall (__NR_tkill, lwpid, signo);
2109 	if (errno != ENOSYS)
2110 	  return ret;
2111 	tkill_failed = 1;
2112       }
2113   }
2114 #endif
2115 
2116   return kill (lwpid, signo);
2117 }
2118 
2119 /* Handle a GNU/Linux syscall trap wait response.  If we see a syscall
2120    event, check if the core is interested in it: if not, ignore the
2121    event, and keep waiting; otherwise, we need to toggle the LWP's
2122    syscall entry/exit status, since the ptrace event itself doesn't
2123    indicate it, and report the trap to higher layers.  */
2124 
2125 static int
2126 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2127 {
2128   struct target_waitstatus *ourstatus = &lp->waitstatus;
2129   struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2130   int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2131 
2132   if (stopping)
2133     {
2134       /* If we're stopping threads, there's a SIGSTOP pending, which
2135 	 makes it so that the LWP reports an immediate syscall return,
2136 	 followed by the SIGSTOP.  Skip seeing that "return" using
2137 	 PTRACE_CONT directly, and let stop_wait_callback collect the
2138 	 SIGSTOP.  Later when the thread is resumed, a new syscall
2139 	 entry event.  If we didn't do this (and returned 0), we'd
2140 	 leave a syscall entry pending, and our caller, by using
2141 	 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2142 	 itself.  Later, when the user re-resumes this LWP, we'd see
2143 	 another syscall entry event and we'd mistake it for a return.
2144 
2145 	 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2146 	 (leaving immediately with LWP->signalled set, without issuing
2147 	 a PTRACE_CONT), it would still be problematic to leave this
2148 	 syscall enter pending, as later when the thread is resumed,
2149 	 it would then see the same syscall exit mentioned above,
2150 	 followed by the delayed SIGSTOP, while the syscall didn't
2151 	 actually get to execute.  It seems it would be even more
2152 	 confusing to the user.  */
2153 
2154       if (debug_linux_nat)
2155 	fprintf_unfiltered (gdb_stdlog,
2156 			    "LHST: ignoring syscall %d "
2157 			    "for LWP %ld (stopping threads), "
2158 			    "resuming with PTRACE_CONT for SIGSTOP\n",
2159 			    syscall_number,
2160 			    GET_LWP (lp->ptid));
2161 
2162       lp->syscall_state = TARGET_WAITKIND_IGNORE;
2163       ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2164       return 1;
2165     }
2166 
2167   if (catch_syscall_enabled ())
2168     {
2169       /* Always update the entry/return state, even if this particular
2170 	 syscall isn't interesting to the core now.  In async mode,
2171 	 the user could install a new catchpoint for this syscall
2172 	 between syscall enter/return, and we'll need to know to
2173 	 report a syscall return if that happens.  */
2174       lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2175 			   ? TARGET_WAITKIND_SYSCALL_RETURN
2176 			   : TARGET_WAITKIND_SYSCALL_ENTRY);
2177 
2178       if (catching_syscall_number (syscall_number))
2179 	{
2180 	  /* Alright, an event to report.  */
2181 	  ourstatus->kind = lp->syscall_state;
2182 	  ourstatus->value.syscall_number = syscall_number;
2183 
2184 	  if (debug_linux_nat)
2185 	    fprintf_unfiltered (gdb_stdlog,
2186 				"LHST: stopping for %s of syscall %d"
2187 				" for LWP %ld\n",
2188 				lp->syscall_state
2189 				== TARGET_WAITKIND_SYSCALL_ENTRY
2190 				? "entry" : "return",
2191 				syscall_number,
2192 				GET_LWP (lp->ptid));
2193 	  return 0;
2194 	}
2195 
2196       if (debug_linux_nat)
2197 	fprintf_unfiltered (gdb_stdlog,
2198 			    "LHST: ignoring %s of syscall %d "
2199 			    "for LWP %ld\n",
2200 			    lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2201 			    ? "entry" : "return",
2202 			    syscall_number,
2203 			    GET_LWP (lp->ptid));
2204     }
2205   else
2206     {
2207       /* If we had been syscall tracing, and hence used PT_SYSCALL
2208 	 before on this LWP, it could happen that the user removes all
2209 	 syscall catchpoints before we get to process this event.
2210 	 There are two noteworthy issues here:
2211 
2212 	 - When stopped at a syscall entry event, resuming with
2213 	   PT_STEP still resumes executing the syscall and reports a
2214 	   syscall return.
2215 
2216 	 - Only PT_SYSCALL catches syscall enters.  If we last
2217 	   single-stepped this thread, then this event can't be a
2218 	   syscall enter.  If we last single-stepped this thread, this
2219 	   has to be a syscall exit.
2220 
2221 	 The points above mean that the next resume, be it PT_STEP or
2222 	 PT_CONTINUE, can not trigger a syscall trace event.  */
2223       if (debug_linux_nat)
2224 	fprintf_unfiltered (gdb_stdlog,
2225 			    "LHST: caught syscall event "
2226 			    "with no syscall catchpoints."
2227 			    " %d for LWP %ld, ignoring\n",
2228 			    syscall_number,
2229 			    GET_LWP (lp->ptid));
2230       lp->syscall_state = TARGET_WAITKIND_IGNORE;
2231     }
2232 
2233   /* The core isn't interested in this event.  For efficiency, avoid
2234      stopping all threads only to have the core resume them all again.
2235      Since we're not stopping threads, if we're still syscall tracing
2236      and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2237      subsequent syscall.  Simply resume using the inf-ptrace layer,
2238      which knows when to use PT_SYSCALL or PT_CONTINUE.  */
2239 
2240   /* Note that gdbarch_get_syscall_number may access registers, hence
2241      fill a regcache.  */
2242   registers_changed ();
2243   if (linux_nat_prepare_to_resume != NULL)
2244     linux_nat_prepare_to_resume (lp);
2245   linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2246 			lp->step, GDB_SIGNAL_0);
2247   return 1;
2248 }
2249 
2250 /* Handle a GNU/Linux extended wait response.  If we see a clone
2251    event, we need to add the new LWP to our list (and not report the
2252    trap to higher layers).  This function returns non-zero if the
2253    event should be ignored and we should wait again.  If STOPPING is
2254    true, the new LWP remains stopped, otherwise it is continued.  */
2255 
2256 static int
2257 linux_handle_extended_wait (struct lwp_info *lp, int status,
2258 			    int stopping)
2259 {
2260   int pid = GET_LWP (lp->ptid);
2261   struct target_waitstatus *ourstatus = &lp->waitstatus;
2262   int event = status >> 16;
2263 
2264   if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2265       || event == PTRACE_EVENT_CLONE)
2266     {
2267       unsigned long new_pid;
2268       int ret;
2269 
2270       ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2271 
2272       /* If we haven't already seen the new PID stop, wait for it now.  */
2273       if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2274 	{
2275 	  /* The new child has a pending SIGSTOP.  We can't affect it until it
2276 	     hits the SIGSTOP, but we're already attached.  */
2277 	  ret = my_waitpid (new_pid, &status,
2278 			    (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2279 	  if (ret == -1)
2280 	    perror_with_name (_("waiting for new child"));
2281 	  else if (ret != new_pid)
2282 	    internal_error (__FILE__, __LINE__,
2283 			    _("wait returned unexpected PID %d"), ret);
2284 	  else if (!WIFSTOPPED (status))
2285 	    internal_error (__FILE__, __LINE__,
2286 			    _("wait returned unexpected status 0x%x"), status);
2287 	}
2288 
2289       ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2290 
2291       if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2292 	{
2293 	  /* The arch-specific native code may need to know about new
2294 	     forks even if those end up never mapped to an
2295 	     inferior.  */
2296 	  if (linux_nat_new_fork != NULL)
2297 	    linux_nat_new_fork (lp, new_pid);
2298 	}
2299 
2300       if (event == PTRACE_EVENT_FORK
2301 	  && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2302 	{
2303 	  /* Handle checkpointing by linux-fork.c here as a special
2304 	     case.  We don't want the follow-fork-mode or 'catch fork'
2305 	     to interfere with this.  */
2306 
2307 	  /* This won't actually modify the breakpoint list, but will
2308 	     physically remove the breakpoints from the child.  */
2309 	  detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2310 
2311 	  /* Retain child fork in ptrace (stopped) state.  */
2312 	  if (!find_fork_pid (new_pid))
2313 	    add_fork (new_pid);
2314 
2315 	  /* Report as spurious, so that infrun doesn't want to follow
2316 	     this fork.  We're actually doing an infcall in
2317 	     linux-fork.c.  */
2318 	  ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2319 	  linux_enable_event_reporting (pid_to_ptid (new_pid));
2320 
2321 	  /* Report the stop to the core.  */
2322 	  return 0;
2323 	}
2324 
2325       if (event == PTRACE_EVENT_FORK)
2326 	ourstatus->kind = TARGET_WAITKIND_FORKED;
2327       else if (event == PTRACE_EVENT_VFORK)
2328 	ourstatus->kind = TARGET_WAITKIND_VFORKED;
2329       else
2330 	{
2331 	  struct lwp_info *new_lp;
2332 
2333 	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
2334 
2335 	  if (debug_linux_nat)
2336 	    fprintf_unfiltered (gdb_stdlog,
2337 				"LHEW: Got clone event "
2338 				"from LWP %d, new child is LWP %ld\n",
2339 				pid, new_pid);
2340 
2341 	  new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2342 	  new_lp->cloned = 1;
2343 	  new_lp->stopped = 1;
2344 
2345 	  if (WSTOPSIG (status) != SIGSTOP)
2346 	    {
2347 	      /* This can happen if someone starts sending signals to
2348 		 the new thread before it gets a chance to run, which
2349 		 have a lower number than SIGSTOP (e.g. SIGUSR1).
2350 		 This is an unlikely case, and harder to handle for
2351 		 fork / vfork than for clone, so we do not try - but
2352 		 we handle it for clone events here.  We'll send
2353 		 the other signal on to the thread below.  */
2354 
2355 	      new_lp->signalled = 1;
2356 	    }
2357 	  else
2358 	    {
2359 	      struct thread_info *tp;
2360 
2361 	      /* When we stop for an event in some other thread, and
2362 		 pull the thread list just as this thread has cloned,
2363 		 we'll have seen the new thread in the thread_db list
2364 		 before handling the CLONE event (glibc's
2365 		 pthread_create adds the new thread to the thread list
2366 		 before clone'ing, and has the kernel fill in the
2367 		 thread's tid on the clone call with
2368 		 CLONE_PARENT_SETTID).  If that happened, and the core
2369 		 had requested the new thread to stop, we'll have
2370 		 killed it with SIGSTOP.  But since SIGSTOP is not an
2371 		 RT signal, it can only be queued once.  We need to be
2372 		 careful to not resume the LWP if we wanted it to
2373 		 stop.  In that case, we'll leave the SIGSTOP pending.
2374 		 It will later be reported as GDB_SIGNAL_0.  */
2375 	      tp = find_thread_ptid (new_lp->ptid);
2376 	      if (tp != NULL && tp->stop_requested)
2377 		new_lp->last_resume_kind = resume_stop;
2378 	      else
2379 		status = 0;
2380 	    }
2381 
2382 	  if (non_stop)
2383 	    {
2384 	      /* Add the new thread to GDB's lists as soon as possible
2385 		 so that:
2386 
2387 		 1) the frontend doesn't have to wait for a stop to
2388 		 display them, and,
2389 
2390 		 2) we tag it with the correct running state.  */
2391 
2392 	      /* If the thread_db layer is active, let it know about
2393 		 this new thread, and add it to GDB's list.  */
2394 	      if (!thread_db_attach_lwp (new_lp->ptid))
2395 		{
2396 		  /* We're not using thread_db.  Add it to GDB's
2397 		     list.  */
2398 		  target_post_attach (GET_LWP (new_lp->ptid));
2399 		  add_thread (new_lp->ptid);
2400 		}
2401 
2402 	      if (!stopping)
2403 		{
2404 		  set_running (new_lp->ptid, 1);
2405 		  set_executing (new_lp->ptid, 1);
2406 		  /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2407 		     resume_stop.  */
2408 		  new_lp->last_resume_kind = resume_continue;
2409 		}
2410 	    }
2411 
2412 	  if (status != 0)
2413 	    {
2414 	      /* We created NEW_LP so it cannot yet contain STATUS.  */
2415 	      gdb_assert (new_lp->status == 0);
2416 
2417 	      /* Save the wait status to report later.  */
2418 	      if (debug_linux_nat)
2419 		fprintf_unfiltered (gdb_stdlog,
2420 				    "LHEW: waitpid of new LWP %ld, "
2421 				    "saving status %s\n",
2422 				    (long) GET_LWP (new_lp->ptid),
2423 				    status_to_str (status));
2424 	      new_lp->status = status;
2425 	    }
2426 
2427 	  /* Note the need to use the low target ops to resume, to
2428 	     handle resuming with PT_SYSCALL if we have syscall
2429 	     catchpoints.  */
2430 	  if (!stopping)
2431 	    {
2432 	      new_lp->resumed = 1;
2433 
2434 	      if (status == 0)
2435 		{
2436 		  gdb_assert (new_lp->last_resume_kind == resume_continue);
2437 		  if (debug_linux_nat)
2438 		    fprintf_unfiltered (gdb_stdlog,
2439 					"LHEW: resuming new LWP %ld\n",
2440 					GET_LWP (new_lp->ptid));
2441 		  if (linux_nat_prepare_to_resume != NULL)
2442 		    linux_nat_prepare_to_resume (new_lp);
2443 		  linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2444 					0, GDB_SIGNAL_0);
2445 		  new_lp->stopped = 0;
2446 		}
2447 	    }
2448 
2449 	  if (debug_linux_nat)
2450 	    fprintf_unfiltered (gdb_stdlog,
2451 				"LHEW: resuming parent LWP %d\n", pid);
2452 	  if (linux_nat_prepare_to_resume != NULL)
2453 	    linux_nat_prepare_to_resume (lp);
2454 	  linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2455 				0, GDB_SIGNAL_0);
2456 
2457 	  return 1;
2458 	}
2459 
2460       return 0;
2461     }
2462 
2463   if (event == PTRACE_EVENT_EXEC)
2464     {
2465       if (debug_linux_nat)
2466 	fprintf_unfiltered (gdb_stdlog,
2467 			    "LHEW: Got exec event from LWP %ld\n",
2468 			    GET_LWP (lp->ptid));
2469 
2470       ourstatus->kind = TARGET_WAITKIND_EXECD;
2471       ourstatus->value.execd_pathname
2472 	= xstrdup (linux_child_pid_to_exec_file (pid));
2473 
2474       return 0;
2475     }
2476 
2477   if (event == PTRACE_EVENT_VFORK_DONE)
2478     {
2479       if (current_inferior ()->waiting_for_vfork_done)
2480 	{
2481 	  if (debug_linux_nat)
2482 	    fprintf_unfiltered (gdb_stdlog,
2483 				"LHEW: Got expected PTRACE_EVENT_"
2484 				"VFORK_DONE from LWP %ld: stopping\n",
2485 				GET_LWP (lp->ptid));
2486 
2487 	  ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2488 	  return 0;
2489 	}
2490 
2491       if (debug_linux_nat)
2492 	fprintf_unfiltered (gdb_stdlog,
2493 			    "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2494 			    "from LWP %ld: resuming\n",
2495 			    GET_LWP (lp->ptid));
2496       ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2497       return 1;
2498     }
2499 
2500   internal_error (__FILE__, __LINE__,
2501 		  _("unknown ptrace event %d"), event);
2502 }
2503 
2504 /* Wait for LP to stop.  Returns the wait status, or 0 if the LWP has
2505    exited.  */
2506 
2507 static int
2508 wait_lwp (struct lwp_info *lp)
2509 {
2510   pid_t pid;
2511   int status = 0;
2512   int thread_dead = 0;
2513   sigset_t prev_mask;
2514 
2515   gdb_assert (!lp->stopped);
2516   gdb_assert (lp->status == 0);
2517 
2518   /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below.  */
2519   block_child_signals (&prev_mask);
2520 
2521   for (;;)
2522     {
2523       /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2524 	 was right and we should just call sigsuspend.  */
2525 
2526       pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2527       if (pid == -1 && errno == ECHILD)
2528 	pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2529       if (pid == -1 && errno == ECHILD)
2530 	{
2531 	  /* The thread has previously exited.  We need to delete it
2532 	     now because, for some vendor 2.4 kernels with NPTL
2533 	     support backported, there won't be an exit event unless
2534 	     it is the main thread.  2.6 kernels will report an exit
2535 	     event for each thread that exits, as expected.  */
2536 	  thread_dead = 1;
2537 	  if (debug_linux_nat)
2538 	    fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2539 				target_pid_to_str (lp->ptid));
2540 	}
2541       if (pid != 0)
2542 	break;
2543 
2544       /* Bugs 10970, 12702.
2545 	 Thread group leader may have exited in which case we'll lock up in
2546 	 waitpid if there are other threads, even if they are all zombies too.
2547 	 Basically, we're not supposed to use waitpid this way.
2548 	 __WCLONE is not applicable for the leader so we can't use that.
2549 	 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2550 	 process; it gets ESRCH both for the zombie and for running processes.
2551 
2552 	 As a workaround, check if we're waiting for the thread group leader and
2553 	 if it's a zombie, and avoid calling waitpid if it is.
2554 
2555 	 This is racy, what if the tgl becomes a zombie right after we check?
2556 	 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2557 	 waiting waitpid but linux_proc_pid_is_zombie is safe this way.  */
2558 
2559       if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2560 	  && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
2561 	{
2562 	  thread_dead = 1;
2563 	  if (debug_linux_nat)
2564 	    fprintf_unfiltered (gdb_stdlog,
2565 				"WL: Thread group leader %s vanished.\n",
2566 				target_pid_to_str (lp->ptid));
2567 	  break;
2568 	}
2569 
2570       /* Wait for next SIGCHLD and try again.  This may let SIGCHLD handlers
2571 	 get invoked despite our caller had them intentionally blocked by
2572 	 block_child_signals.  This is sensitive only to the loop of
2573 	 linux_nat_wait_1 and there if we get called my_waitpid gets called
2574 	 again before it gets to sigsuspend so we can safely let the handlers
2575 	 get executed here.  */
2576 
2577       sigsuspend (&suspend_mask);
2578     }
2579 
2580   restore_child_signals_mask (&prev_mask);
2581 
2582   if (!thread_dead)
2583     {
2584       gdb_assert (pid == GET_LWP (lp->ptid));
2585 
2586       if (debug_linux_nat)
2587 	{
2588 	  fprintf_unfiltered (gdb_stdlog,
2589 			      "WL: waitpid %s received %s\n",
2590 			      target_pid_to_str (lp->ptid),
2591 			      status_to_str (status));
2592 	}
2593 
2594       /* Check if the thread has exited.  */
2595       if (WIFEXITED (status) || WIFSIGNALED (status))
2596 	{
2597 	  thread_dead = 1;
2598 	  if (debug_linux_nat)
2599 	    fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2600 				target_pid_to_str (lp->ptid));
2601 	}
2602     }
2603 
2604   if (thread_dead)
2605     {
2606       exit_lwp (lp);
2607       return 0;
2608     }
2609 
2610   gdb_assert (WIFSTOPPED (status));
2611 
2612   /* Handle GNU/Linux's syscall SIGTRAPs.  */
2613   if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2614     {
2615       /* No longer need the sysgood bit.  The ptrace event ends up
2616 	 recorded in lp->waitstatus if we care for it.  We can carry
2617 	 on handling the event like a regular SIGTRAP from here
2618 	 on.  */
2619       status = W_STOPCODE (SIGTRAP);
2620       if (linux_handle_syscall_trap (lp, 1))
2621 	return wait_lwp (lp);
2622     }
2623 
2624   /* Handle GNU/Linux's extended waitstatus for trace events.  */
2625   if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2626     {
2627       if (debug_linux_nat)
2628 	fprintf_unfiltered (gdb_stdlog,
2629 			    "WL: Handling extended status 0x%06x\n",
2630 			    status);
2631       if (linux_handle_extended_wait (lp, status, 1))
2632 	return wait_lwp (lp);
2633     }
2634 
2635   return status;
2636 }
2637 
2638 /* Send a SIGSTOP to LP.  */
2639 
2640 static int
2641 stop_callback (struct lwp_info *lp, void *data)
2642 {
2643   if (!lp->stopped && !lp->signalled)
2644     {
2645       int ret;
2646 
2647       if (debug_linux_nat)
2648 	{
2649 	  fprintf_unfiltered (gdb_stdlog,
2650 			      "SC:  kill %s **<SIGSTOP>**\n",
2651 			      target_pid_to_str (lp->ptid));
2652 	}
2653       errno = 0;
2654       ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2655       if (debug_linux_nat)
2656 	{
2657 	  fprintf_unfiltered (gdb_stdlog,
2658 			      "SC:  lwp kill %d %s\n",
2659 			      ret,
2660 			      errno ? safe_strerror (errno) : "ERRNO-OK");
2661 	}
2662 
2663       lp->signalled = 1;
2664       gdb_assert (lp->status == 0);
2665     }
2666 
2667   return 0;
2668 }
2669 
2670 /* Request a stop on LWP.  */
2671 
2672 void
2673 linux_stop_lwp (struct lwp_info *lwp)
2674 {
2675   stop_callback (lwp, NULL);
2676 }
2677 
2678 /* Return non-zero if LWP PID has a pending SIGINT.  */
2679 
2680 static int
2681 linux_nat_has_pending_sigint (int pid)
2682 {
2683   sigset_t pending, blocked, ignored;
2684 
2685   linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2686 
2687   if (sigismember (&pending, SIGINT)
2688       && !sigismember (&ignored, SIGINT))
2689     return 1;
2690 
2691   return 0;
2692 }
2693 
2694 /* Set a flag in LP indicating that we should ignore its next SIGINT.  */
2695 
2696 static int
2697 set_ignore_sigint (struct lwp_info *lp, void *data)
2698 {
2699   /* If a thread has a pending SIGINT, consume it; otherwise, set a
2700      flag to consume the next one.  */
2701   if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2702       && WSTOPSIG (lp->status) == SIGINT)
2703     lp->status = 0;
2704   else
2705     lp->ignore_sigint = 1;
2706 
2707   return 0;
2708 }
2709 
2710 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2711    This function is called after we know the LWP has stopped; if the LWP
2712    stopped before the expected SIGINT was delivered, then it will never have
2713    arrived.  Also, if the signal was delivered to a shared queue and consumed
2714    by a different thread, it will never be delivered to this LWP.  */
2715 
2716 static void
2717 maybe_clear_ignore_sigint (struct lwp_info *lp)
2718 {
2719   if (!lp->ignore_sigint)
2720     return;
2721 
2722   if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2723     {
2724       if (debug_linux_nat)
2725 	fprintf_unfiltered (gdb_stdlog,
2726 			    "MCIS: Clearing bogus flag for %s\n",
2727 			    target_pid_to_str (lp->ptid));
2728       lp->ignore_sigint = 0;
2729     }
2730 }
2731 
2732 /* Fetch the possible triggered data watchpoint info and store it in
2733    LP.
2734 
2735    On some archs, like x86, that use debug registers to set
2736    watchpoints, it's possible that the way to know which watched
2737    address trapped, is to check the register that is used to select
2738    which address to watch.  Problem is, between setting the watchpoint
2739    and reading back which data address trapped, the user may change
2740    the set of watchpoints, and, as a consequence, GDB changes the
2741    debug registers in the inferior.  To avoid reading back a stale
2742    stopped-data-address when that happens, we cache in LP the fact
2743    that a watchpoint trapped, and the corresponding data address, as
2744    soon as we see LP stop with a SIGTRAP.  If GDB changes the debug
2745    registers meanwhile, we have the cached data we can rely on.  */
2746 
2747 static void
2748 save_sigtrap (struct lwp_info *lp)
2749 {
2750   struct cleanup *old_chain;
2751 
2752   if (linux_ops->to_stopped_by_watchpoint == NULL)
2753     {
2754       lp->stopped_by_watchpoint = 0;
2755       return;
2756     }
2757 
2758   old_chain = save_inferior_ptid ();
2759   inferior_ptid = lp->ptid;
2760 
2761   lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2762 
2763   if (lp->stopped_by_watchpoint)
2764     {
2765       if (linux_ops->to_stopped_data_address != NULL)
2766 	lp->stopped_data_address_p =
2767 	  linux_ops->to_stopped_data_address (&current_target,
2768 					      &lp->stopped_data_address);
2769       else
2770 	lp->stopped_data_address_p = 0;
2771     }
2772 
2773   do_cleanups (old_chain);
2774 }
2775 
2776 /* See save_sigtrap.  */
2777 
2778 static int
2779 linux_nat_stopped_by_watchpoint (void)
2780 {
2781   struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2782 
2783   gdb_assert (lp != NULL);
2784 
2785   return lp->stopped_by_watchpoint;
2786 }
2787 
2788 static int
2789 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2790 {
2791   struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2792 
2793   gdb_assert (lp != NULL);
2794 
2795   *addr_p = lp->stopped_data_address;
2796 
2797   return lp->stopped_data_address_p;
2798 }
2799 
2800 /* Commonly any breakpoint / watchpoint generate only SIGTRAP.  */
2801 
2802 static int
2803 sigtrap_is_event (int status)
2804 {
2805   return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2806 }
2807 
2808 /* SIGTRAP-like events recognizer.  */
2809 
2810 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2811 
2812 /* Check for SIGTRAP-like events in LP.  */
2813 
2814 static int
2815 linux_nat_lp_status_is_event (struct lwp_info *lp)
2816 {
2817   /* We check for lp->waitstatus in addition to lp->status, because we can
2818      have pending process exits recorded in lp->status
2819      and W_EXITCODE(0,0) == 0.  We should probably have an additional
2820      lp->status_p flag.  */
2821 
2822   return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2823 	  && linux_nat_status_is_event (lp->status));
2824 }
2825 
2826 /* Set alternative SIGTRAP-like events recognizer.  If
2827    breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2828    applied.  */
2829 
2830 void
2831 linux_nat_set_status_is_event (struct target_ops *t,
2832 			       int (*status_is_event) (int status))
2833 {
2834   linux_nat_status_is_event = status_is_event;
2835 }
2836 
2837 /* Wait until LP is stopped.  */
2838 
2839 static int
2840 stop_wait_callback (struct lwp_info *lp, void *data)
2841 {
2842   struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2843 
2844   /* If this is a vfork parent, bail out, it is not going to report
2845      any SIGSTOP until the vfork is done with.  */
2846   if (inf->vfork_child != NULL)
2847     return 0;
2848 
2849   if (!lp->stopped)
2850     {
2851       int status;
2852 
2853       status = wait_lwp (lp);
2854       if (status == 0)
2855 	return 0;
2856 
2857       if (lp->ignore_sigint && WIFSTOPPED (status)
2858 	  && WSTOPSIG (status) == SIGINT)
2859 	{
2860 	  lp->ignore_sigint = 0;
2861 
2862 	  errno = 0;
2863 	  ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2864 	  if (debug_linux_nat)
2865 	    fprintf_unfiltered (gdb_stdlog,
2866 				"PTRACE_CONT %s, 0, 0 (%s) "
2867 				"(discarding SIGINT)\n",
2868 				target_pid_to_str (lp->ptid),
2869 				errno ? safe_strerror (errno) : "OK");
2870 
2871 	  return stop_wait_callback (lp, NULL);
2872 	}
2873 
2874       maybe_clear_ignore_sigint (lp);
2875 
2876       if (WSTOPSIG (status) != SIGSTOP)
2877 	{
2878 	  /* The thread was stopped with a signal other than SIGSTOP.  */
2879 
2880 	  save_sigtrap (lp);
2881 
2882 	  if (debug_linux_nat)
2883 	    fprintf_unfiltered (gdb_stdlog,
2884 				"SWC: Pending event %s in %s\n",
2885 				status_to_str ((int) status),
2886 				target_pid_to_str (lp->ptid));
2887 
2888 	  /* Save the sigtrap event.  */
2889 	  lp->status = status;
2890 	  gdb_assert (!lp->stopped);
2891 	  gdb_assert (lp->signalled);
2892 	  lp->stopped = 1;
2893 	}
2894       else
2895 	{
2896 	  /* We caught the SIGSTOP that we intended to catch, so
2897 	     there's no SIGSTOP pending.  */
2898 
2899 	  if (debug_linux_nat)
2900 	    fprintf_unfiltered (gdb_stdlog,
2901 				"SWC: Delayed SIGSTOP caught for %s.\n",
2902 				target_pid_to_str (lp->ptid));
2903 
2904 	  lp->stopped = 1;
2905 
2906 	  /* Reset SIGNALLED only after the stop_wait_callback call
2907 	     above as it does gdb_assert on SIGNALLED.  */
2908 	  lp->signalled = 0;
2909 	}
2910     }
2911 
2912   return 0;
2913 }
2914 
2915 /* Return non-zero if LP has a wait status pending.  */
2916 
2917 static int
2918 status_callback (struct lwp_info *lp, void *data)
2919 {
2920   /* Only report a pending wait status if we pretend that this has
2921      indeed been resumed.  */
2922   if (!lp->resumed)
2923     return 0;
2924 
2925   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2926     {
2927       /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2928 	 or a pending process exit.  Note that `W_EXITCODE(0,0) ==
2929 	 0', so a clean process exit can not be stored pending in
2930 	 lp->status, it is indistinguishable from
2931 	 no-pending-status.  */
2932       return 1;
2933     }
2934 
2935   if (lp->status != 0)
2936     return 1;
2937 
2938   return 0;
2939 }
2940 
2941 /* Return non-zero if LP isn't stopped.  */
2942 
2943 static int
2944 running_callback (struct lwp_info *lp, void *data)
2945 {
2946   return (!lp->stopped
2947 	  || ((lp->status != 0
2948 	       || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2949 	      && lp->resumed));
2950 }
2951 
2952 /* Count the LWP's that have had events.  */
2953 
2954 static int
2955 count_events_callback (struct lwp_info *lp, void *data)
2956 {
2957   int *count = data;
2958 
2959   gdb_assert (count != NULL);
2960 
2961   /* Count only resumed LWPs that have a SIGTRAP event pending.  */
2962   if (lp->resumed && linux_nat_lp_status_is_event (lp))
2963     (*count)++;
2964 
2965   return 0;
2966 }
2967 
2968 /* Select the LWP (if any) that is currently being single-stepped.  */
2969 
2970 static int
2971 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2972 {
2973   if (lp->last_resume_kind == resume_step
2974       && lp->status != 0)
2975     return 1;
2976   else
2977     return 0;
2978 }
2979 
2980 /* Select the Nth LWP that has had a SIGTRAP event.  */
2981 
2982 static int
2983 select_event_lwp_callback (struct lwp_info *lp, void *data)
2984 {
2985   int *selector = data;
2986 
2987   gdb_assert (selector != NULL);
2988 
2989   /* Select only resumed LWPs that have a SIGTRAP event pending.  */
2990   if (lp->resumed && linux_nat_lp_status_is_event (lp))
2991     if ((*selector)-- == 0)
2992       return 1;
2993 
2994   return 0;
2995 }
2996 
2997 static int
2998 cancel_breakpoint (struct lwp_info *lp)
2999 {
3000   /* Arrange for a breakpoint to be hit again later.  We don't keep
3001      the SIGTRAP status and don't forward the SIGTRAP signal to the
3002      LWP.  We will handle the current event, eventually we will resume
3003      this LWP, and this breakpoint will trap again.
3004 
3005      If we do not do this, then we run the risk that the user will
3006      delete or disable the breakpoint, but the LWP will have already
3007      tripped on it.  */
3008 
3009   struct regcache *regcache = get_thread_regcache (lp->ptid);
3010   struct gdbarch *gdbarch = get_regcache_arch (regcache);
3011   CORE_ADDR pc;
3012 
3013   pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3014   if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3015     {
3016       if (debug_linux_nat)
3017 	fprintf_unfiltered (gdb_stdlog,
3018 			    "CB: Push back breakpoint for %s\n",
3019 			    target_pid_to_str (lp->ptid));
3020 
3021       /* Back up the PC if necessary.  */
3022       if (gdbarch_decr_pc_after_break (gdbarch))
3023 	regcache_write_pc (regcache, pc);
3024 
3025       return 1;
3026     }
3027   return 0;
3028 }
3029 
3030 static int
3031 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3032 {
3033   struct lwp_info *event_lp = data;
3034 
3035   /* Leave the LWP that has been elected to receive a SIGTRAP alone.  */
3036   if (lp == event_lp)
3037     return 0;
3038 
3039   /* If a LWP other than the LWP that we're reporting an event for has
3040      hit a GDB breakpoint (as opposed to some random trap signal),
3041      then just arrange for it to hit it again later.  We don't keep
3042      the SIGTRAP status and don't forward the SIGTRAP signal to the
3043      LWP.  We will handle the current event, eventually we will resume
3044      all LWPs, and this one will get its breakpoint trap again.
3045 
3046      If we do not do this, then we run the risk that the user will
3047      delete or disable the breakpoint, but the LWP will have already
3048      tripped on it.  */
3049 
3050   if (linux_nat_lp_status_is_event (lp)
3051       && cancel_breakpoint (lp))
3052     /* Throw away the SIGTRAP.  */
3053     lp->status = 0;
3054 
3055   return 0;
3056 }
3057 
3058 /* Select one LWP out of those that have events pending.  */
3059 
3060 static void
3061 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3062 {
3063   int num_events = 0;
3064   int random_selector;
3065   struct lwp_info *event_lp;
3066 
3067   /* Record the wait status for the original LWP.  */
3068   (*orig_lp)->status = *status;
3069 
3070   /* Give preference to any LWP that is being single-stepped.  */
3071   event_lp = iterate_over_lwps (filter,
3072 				select_singlestep_lwp_callback, NULL);
3073   if (event_lp != NULL)
3074     {
3075       if (debug_linux_nat)
3076 	fprintf_unfiltered (gdb_stdlog,
3077 			    "SEL: Select single-step %s\n",
3078 			    target_pid_to_str (event_lp->ptid));
3079     }
3080   else
3081     {
3082       /* No single-stepping LWP.  Select one at random, out of those
3083          which have had SIGTRAP events.  */
3084 
3085       /* First see how many SIGTRAP events we have.  */
3086       iterate_over_lwps (filter, count_events_callback, &num_events);
3087 
3088       /* Now randomly pick a LWP out of those that have had a SIGTRAP.  */
3089       random_selector = (int)
3090 	((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3091 
3092       if (debug_linux_nat && num_events > 1)
3093 	fprintf_unfiltered (gdb_stdlog,
3094 			    "SEL: Found %d SIGTRAP events, selecting #%d\n",
3095 			    num_events, random_selector);
3096 
3097       event_lp = iterate_over_lwps (filter,
3098 				    select_event_lwp_callback,
3099 				    &random_selector);
3100     }
3101 
3102   if (event_lp != NULL)
3103     {
3104       /* Switch the event LWP.  */
3105       *orig_lp = event_lp;
3106       *status = event_lp->status;
3107     }
3108 
3109   /* Flush the wait status for the event LWP.  */
3110   (*orig_lp)->status = 0;
3111 }
3112 
3113 /* Return non-zero if LP has been resumed.  */
3114 
3115 static int
3116 resumed_callback (struct lwp_info *lp, void *data)
3117 {
3118   return lp->resumed;
3119 }
3120 
3121 /* Stop an active thread, verify it still exists, then resume it.  If
3122    the thread ends up with a pending status, then it is not resumed,
3123    and *DATA (really a pointer to int), is set.  */
3124 
3125 static int
3126 stop_and_resume_callback (struct lwp_info *lp, void *data)
3127 {
3128   int *new_pending_p = data;
3129 
3130   if (!lp->stopped)
3131     {
3132       ptid_t ptid = lp->ptid;
3133 
3134       stop_callback (lp, NULL);
3135       stop_wait_callback (lp, NULL);
3136 
3137       /* Resume if the lwp still exists, and the core wanted it
3138 	 running.  */
3139       lp = find_lwp_pid (ptid);
3140       if (lp != NULL)
3141 	{
3142 	  if (lp->last_resume_kind == resume_stop
3143 	      && lp->status == 0)
3144 	    {
3145 	      /* The core wanted the LWP to stop.  Even if it stopped
3146 		 cleanly (with SIGSTOP), leave the event pending.  */
3147 	      if (debug_linux_nat)
3148 		fprintf_unfiltered (gdb_stdlog,
3149 				    "SARC: core wanted LWP %ld stopped "
3150 				    "(leaving SIGSTOP pending)\n",
3151 				    GET_LWP (lp->ptid));
3152 	      lp->status = W_STOPCODE (SIGSTOP);
3153 	    }
3154 
3155 	  if (lp->status == 0)
3156 	    {
3157 	      if (debug_linux_nat)
3158 		fprintf_unfiltered (gdb_stdlog,
3159 				    "SARC: re-resuming LWP %ld\n",
3160 				    GET_LWP (lp->ptid));
3161 	      resume_lwp (lp, lp->step, GDB_SIGNAL_0);
3162 	    }
3163 	  else
3164 	    {
3165 	      if (debug_linux_nat)
3166 		fprintf_unfiltered (gdb_stdlog,
3167 				    "SARC: not re-resuming LWP %ld "
3168 				    "(has pending)\n",
3169 				    GET_LWP (lp->ptid));
3170 	      if (new_pending_p)
3171 		*new_pending_p = 1;
3172 	    }
3173 	}
3174     }
3175   return 0;
3176 }
3177 
3178 /* Check if we should go on and pass this event to common code.
3179    Return the affected lwp if we are, or NULL otherwise.  If we stop
3180    all lwps temporarily, we may end up with new pending events in some
3181    other lwp.  In that case set *NEW_PENDING_P to true.  */
3182 
3183 static struct lwp_info *
3184 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3185 {
3186   struct lwp_info *lp;
3187 
3188   *new_pending_p = 0;
3189 
3190   lp = find_lwp_pid (pid_to_ptid (lwpid));
3191 
3192   /* Check for stop events reported by a process we didn't already
3193      know about - anything not already in our LWP list.
3194 
3195      If we're expecting to receive stopped processes after
3196      fork, vfork, and clone events, then we'll just add the
3197      new one to our list and go back to waiting for the event
3198      to be reported - the stopped process might be returned
3199      from waitpid before or after the event is.
3200 
3201      But note the case of a non-leader thread exec'ing after the
3202      leader having exited, and gone from our lists.  The non-leader
3203      thread changes its tid to the tgid.  */
3204 
3205   if (WIFSTOPPED (status) && lp == NULL
3206       && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3207     {
3208       /* A multi-thread exec after we had seen the leader exiting.  */
3209       if (debug_linux_nat)
3210 	fprintf_unfiltered (gdb_stdlog,
3211 			    "LLW: Re-adding thread group leader LWP %d.\n",
3212 			    lwpid);
3213 
3214       lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3215       lp->stopped = 1;
3216       lp->resumed = 1;
3217       add_thread (lp->ptid);
3218     }
3219 
3220   if (WIFSTOPPED (status) && !lp)
3221     {
3222       add_to_pid_list (&stopped_pids, lwpid, status);
3223       return NULL;
3224     }
3225 
3226   /* Make sure we don't report an event for the exit of an LWP not in
3227      our list, i.e. not part of the current process.  This can happen
3228      if we detach from a program we originally forked and then it
3229      exits.  */
3230   if (!WIFSTOPPED (status) && !lp)
3231     return NULL;
3232 
3233   /* Handle GNU/Linux's syscall SIGTRAPs.  */
3234   if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3235     {
3236       /* No longer need the sysgood bit.  The ptrace event ends up
3237 	 recorded in lp->waitstatus if we care for it.  We can carry
3238 	 on handling the event like a regular SIGTRAP from here
3239 	 on.  */
3240       status = W_STOPCODE (SIGTRAP);
3241       if (linux_handle_syscall_trap (lp, 0))
3242 	return NULL;
3243     }
3244 
3245   /* Handle GNU/Linux's extended waitstatus for trace events.  */
3246   if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3247     {
3248       if (debug_linux_nat)
3249 	fprintf_unfiltered (gdb_stdlog,
3250 			    "LLW: Handling extended status 0x%06x\n",
3251 			    status);
3252       if (linux_handle_extended_wait (lp, status, 0))
3253 	return NULL;
3254     }
3255 
3256   if (linux_nat_status_is_event (status))
3257     save_sigtrap (lp);
3258 
3259   /* Check if the thread has exited.  */
3260   if ((WIFEXITED (status) || WIFSIGNALED (status))
3261       && num_lwps (GET_PID (lp->ptid)) > 1)
3262     {
3263       /* If this is the main thread, we must stop all threads and verify
3264 	 if they are still alive.  This is because in the nptl thread model
3265 	 on Linux 2.4, there is no signal issued for exiting LWPs
3266 	 other than the main thread.  We only get the main thread exit
3267 	 signal once all child threads have already exited.  If we
3268 	 stop all the threads and use the stop_wait_callback to check
3269 	 if they have exited we can determine whether this signal
3270 	 should be ignored or whether it means the end of the debugged
3271 	 application, regardless of which threading model is being
3272 	 used.  */
3273       if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3274 	{
3275 	  lp->stopped = 1;
3276 	  iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3277 			     stop_and_resume_callback, new_pending_p);
3278 	}
3279 
3280       if (debug_linux_nat)
3281 	fprintf_unfiltered (gdb_stdlog,
3282 			    "LLW: %s exited.\n",
3283 			    target_pid_to_str (lp->ptid));
3284 
3285       if (num_lwps (GET_PID (lp->ptid)) > 1)
3286        {
3287 	 /* If there is at least one more LWP, then the exit signal
3288 	    was not the end of the debugged application and should be
3289 	    ignored.  */
3290 	 exit_lwp (lp);
3291 	 return NULL;
3292        }
3293     }
3294 
3295   /* Check if the current LWP has previously exited.  In the nptl
3296      thread model, LWPs other than the main thread do not issue
3297      signals when they exit so we must check whenever the thread has
3298      stopped.  A similar check is made in stop_wait_callback().  */
3299   if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3300     {
3301       ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3302 
3303       if (debug_linux_nat)
3304 	fprintf_unfiltered (gdb_stdlog,
3305 			    "LLW: %s exited.\n",
3306 			    target_pid_to_str (lp->ptid));
3307 
3308       exit_lwp (lp);
3309 
3310       /* Make sure there is at least one thread running.  */
3311       gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3312 
3313       /* Discard the event.  */
3314       return NULL;
3315     }
3316 
3317   /* Make sure we don't report a SIGSTOP that we sent ourselves in
3318      an attempt to stop an LWP.  */
3319   if (lp->signalled
3320       && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3321     {
3322       if (debug_linux_nat)
3323 	fprintf_unfiltered (gdb_stdlog,
3324 			    "LLW: Delayed SIGSTOP caught for %s.\n",
3325 			    target_pid_to_str (lp->ptid));
3326 
3327       lp->signalled = 0;
3328 
3329       if (lp->last_resume_kind != resume_stop)
3330 	{
3331 	  /* This is a delayed SIGSTOP.  */
3332 
3333 	  registers_changed ();
3334 
3335 	  if (linux_nat_prepare_to_resume != NULL)
3336 	    linux_nat_prepare_to_resume (lp);
3337 	  linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3338 			    lp->step, GDB_SIGNAL_0);
3339 	  if (debug_linux_nat)
3340 	    fprintf_unfiltered (gdb_stdlog,
3341 				"LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3342 				lp->step ?
3343 				"PTRACE_SINGLESTEP" : "PTRACE_CONT",
3344 				target_pid_to_str (lp->ptid));
3345 
3346 	  lp->stopped = 0;
3347 	  gdb_assert (lp->resumed);
3348 
3349 	  /* Discard the event.  */
3350 	  return NULL;
3351 	}
3352     }
3353 
3354   /* Make sure we don't report a SIGINT that we have already displayed
3355      for another thread.  */
3356   if (lp->ignore_sigint
3357       && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3358     {
3359       if (debug_linux_nat)
3360 	fprintf_unfiltered (gdb_stdlog,
3361 			    "LLW: Delayed SIGINT caught for %s.\n",
3362 			    target_pid_to_str (lp->ptid));
3363 
3364       /* This is a delayed SIGINT.  */
3365       lp->ignore_sigint = 0;
3366 
3367       registers_changed ();
3368       if (linux_nat_prepare_to_resume != NULL)
3369 	linux_nat_prepare_to_resume (lp);
3370       linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3371 			    lp->step, GDB_SIGNAL_0);
3372       if (debug_linux_nat)
3373 	fprintf_unfiltered (gdb_stdlog,
3374 			    "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3375 			    lp->step ?
3376 			    "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3377 			    target_pid_to_str (lp->ptid));
3378 
3379       lp->stopped = 0;
3380       gdb_assert (lp->resumed);
3381 
3382       /* Discard the event.  */
3383       return NULL;
3384     }
3385 
3386   /* An interesting event.  */
3387   gdb_assert (lp);
3388   lp->status = status;
3389   return lp;
3390 }
3391 
3392 /* Detect zombie thread group leaders, and "exit" them.  We can't reap
3393    their exits until all other threads in the group have exited.  */
3394 
3395 static void
3396 check_zombie_leaders (void)
3397 {
3398   struct inferior *inf;
3399 
3400   ALL_INFERIORS (inf)
3401     {
3402       struct lwp_info *leader_lp;
3403 
3404       if (inf->pid == 0)
3405 	continue;
3406 
3407       leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3408       if (leader_lp != NULL
3409 	  /* Check if there are other threads in the group, as we may
3410 	     have raced with the inferior simply exiting.  */
3411 	  && num_lwps (inf->pid) > 1
3412 	  && linux_proc_pid_is_zombie (inf->pid))
3413 	{
3414 	  if (debug_linux_nat)
3415 	    fprintf_unfiltered (gdb_stdlog,
3416 				"CZL: Thread group leader %d zombie "
3417 				"(it exited, or another thread execd).\n",
3418 				inf->pid);
3419 
3420 	  /* A leader zombie can mean one of two things:
3421 
3422 	     - It exited, and there's an exit status pending
3423 	     available, or only the leader exited (not the whole
3424 	     program).  In the latter case, we can't waitpid the
3425 	     leader's exit status until all other threads are gone.
3426 
3427 	     - There are 3 or more threads in the group, and a thread
3428 	     other than the leader exec'd.  On an exec, the Linux
3429 	     kernel destroys all other threads (except the execing
3430 	     one) in the thread group, and resets the execing thread's
3431 	     tid to the tgid.  No exit notification is sent for the
3432 	     execing thread -- from the ptracer's perspective, it
3433 	     appears as though the execing thread just vanishes.
3434 	     Until we reap all other threads except the leader and the
3435 	     execing thread, the leader will be zombie, and the
3436 	     execing thread will be in `D (disc sleep)'.  As soon as
3437 	     all other threads are reaped, the execing thread changes
3438 	     it's tid to the tgid, and the previous (zombie) leader
3439 	     vanishes, giving place to the "new" leader.  We could try
3440 	     distinguishing the exit and exec cases, by waiting once
3441 	     more, and seeing if something comes out, but it doesn't
3442 	     sound useful.  The previous leader _does_ go away, and
3443 	     we'll re-add the new one once we see the exec event
3444 	     (which is just the same as what would happen if the
3445 	     previous leader did exit voluntarily before some other
3446 	     thread execs).  */
3447 
3448 	  if (debug_linux_nat)
3449 	    fprintf_unfiltered (gdb_stdlog,
3450 				"CZL: Thread group leader %d vanished.\n",
3451 				inf->pid);
3452 	  exit_lwp (leader_lp);
3453 	}
3454     }
3455 }
3456 
3457 static ptid_t
3458 linux_nat_wait_1 (struct target_ops *ops,
3459 		  ptid_t ptid, struct target_waitstatus *ourstatus,
3460 		  int target_options)
3461 {
3462   static sigset_t prev_mask;
3463   enum resume_kind last_resume_kind;
3464   struct lwp_info *lp;
3465   int status;
3466 
3467   if (debug_linux_nat)
3468     fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3469 
3470   /* The first time we get here after starting a new inferior, we may
3471      not have added it to the LWP list yet - this is the earliest
3472      moment at which we know its PID.  */
3473   if (ptid_is_pid (inferior_ptid))
3474     {
3475       /* Upgrade the main thread's ptid.  */
3476       thread_change_ptid (inferior_ptid,
3477 			  BUILD_LWP (GET_PID (inferior_ptid),
3478 				     GET_PID (inferior_ptid)));
3479 
3480       lp = add_initial_lwp (inferior_ptid);
3481       lp->resumed = 1;
3482     }
3483 
3484   /* Make sure SIGCHLD is blocked.  */
3485   block_child_signals (&prev_mask);
3486 
3487 retry:
3488   lp = NULL;
3489   status = 0;
3490 
3491   /* First check if there is a LWP with a wait status pending.  */
3492   if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3493     {
3494       /* Any LWP in the PTID group that's been resumed will do.  */
3495       lp = iterate_over_lwps (ptid, status_callback, NULL);
3496       if (lp)
3497 	{
3498 	  if (debug_linux_nat && lp->status)
3499 	    fprintf_unfiltered (gdb_stdlog,
3500 				"LLW: Using pending wait status %s for %s.\n",
3501 				status_to_str (lp->status),
3502 				target_pid_to_str (lp->ptid));
3503 	}
3504     }
3505   else if (is_lwp (ptid))
3506     {
3507       if (debug_linux_nat)
3508 	fprintf_unfiltered (gdb_stdlog,
3509 			    "LLW: Waiting for specific LWP %s.\n",
3510 			    target_pid_to_str (ptid));
3511 
3512       /* We have a specific LWP to check.  */
3513       lp = find_lwp_pid (ptid);
3514       gdb_assert (lp);
3515 
3516       if (debug_linux_nat && lp->status)
3517 	fprintf_unfiltered (gdb_stdlog,
3518 			    "LLW: Using pending wait status %s for %s.\n",
3519 			    status_to_str (lp->status),
3520 			    target_pid_to_str (lp->ptid));
3521 
3522       /* We check for lp->waitstatus in addition to lp->status,
3523 	 because we can have pending process exits recorded in
3524 	 lp->status and W_EXITCODE(0,0) == 0.  We should probably have
3525 	 an additional lp->status_p flag.  */
3526       if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3527 	lp = NULL;
3528     }
3529 
3530   if (!target_can_async_p ())
3531     {
3532       /* Causes SIGINT to be passed on to the attached process.  */
3533       set_sigint_trap ();
3534     }
3535 
3536   /* But if we don't find a pending event, we'll have to wait.  */
3537 
3538   while (lp == NULL)
3539     {
3540       pid_t lwpid;
3541 
3542       /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3543 	 quirks:
3544 
3545 	 - If the thread group leader exits while other threads in the
3546 	   thread group still exist, waitpid(TGID, ...) hangs.  That
3547 	   waitpid won't return an exit status until the other threads
3548 	   in the group are reapped.
3549 
3550 	 - When a non-leader thread execs, that thread just vanishes
3551 	   without reporting an exit (so we'd hang if we waited for it
3552 	   explicitly in that case).  The exec event is reported to
3553 	   the TGID pid.  */
3554 
3555       errno = 0;
3556       lwpid = my_waitpid (-1, &status,  __WCLONE | WNOHANG);
3557       if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3558 	lwpid = my_waitpid (-1, &status, WNOHANG);
3559 
3560       if (debug_linux_nat)
3561 	fprintf_unfiltered (gdb_stdlog,
3562 			    "LNW: waitpid(-1, ...) returned %d, %s\n",
3563 			    lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3564 
3565       if (lwpid > 0)
3566 	{
3567 	  /* If this is true, then we paused LWPs momentarily, and may
3568 	     now have pending events to handle.  */
3569 	  int new_pending;
3570 
3571 	  if (debug_linux_nat)
3572 	    {
3573 	      fprintf_unfiltered (gdb_stdlog,
3574 				  "LLW: waitpid %ld received %s\n",
3575 				  (long) lwpid, status_to_str (status));
3576 	    }
3577 
3578 	  lp = linux_nat_filter_event (lwpid, status, &new_pending);
3579 
3580 	  /* STATUS is now no longer valid, use LP->STATUS instead.  */
3581 	  status = 0;
3582 
3583 	  if (lp && !ptid_match (lp->ptid, ptid))
3584 	    {
3585 	      gdb_assert (lp->resumed);
3586 
3587 	      if (debug_linux_nat)
3588 		fprintf (stderr,
3589 			 "LWP %ld got an event %06x, leaving pending.\n",
3590 			 ptid_get_lwp (lp->ptid), lp->status);
3591 
3592 	      if (WIFSTOPPED (lp->status))
3593 		{
3594 		  if (WSTOPSIG (lp->status) != SIGSTOP)
3595 		    {
3596 		      /* Cancel breakpoint hits.  The breakpoint may
3597 			 be removed before we fetch events from this
3598 			 process to report to the core.  It is best
3599 			 not to assume the moribund breakpoints
3600 			 heuristic always handles these cases --- it
3601 			 could be too many events go through to the
3602 			 core before this one is handled.  All-stop
3603 			 always cancels breakpoint hits in all
3604 			 threads.  */
3605 		      if (non_stop
3606 			  && linux_nat_lp_status_is_event (lp)
3607 			  && cancel_breakpoint (lp))
3608 			{
3609 			  /* Throw away the SIGTRAP.  */
3610 			  lp->status = 0;
3611 
3612 			  if (debug_linux_nat)
3613 			    fprintf (stderr,
3614 				     "LLW: LWP %ld hit a breakpoint while"
3615 				     " waiting for another process;"
3616 				     " cancelled it\n",
3617 				     ptid_get_lwp (lp->ptid));
3618 			}
3619 		      lp->stopped = 1;
3620 		    }
3621 		  else
3622 		    {
3623 		      lp->stopped = 1;
3624 		      lp->signalled = 0;
3625 		    }
3626 		}
3627 	      else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3628 		{
3629 		  if (debug_linux_nat)
3630 		    fprintf (stderr,
3631 			     "Process %ld exited while stopping LWPs\n",
3632 			     ptid_get_lwp (lp->ptid));
3633 
3634 		  /* This was the last lwp in the process.  Since
3635 		     events are serialized to GDB core, and we can't
3636 		     report this one right now, but GDB core and the
3637 		     other target layers will want to be notified
3638 		     about the exit code/signal, leave the status
3639 		     pending for the next time we're able to report
3640 		     it.  */
3641 
3642 		  /* Prevent trying to stop this thread again.  We'll
3643 		     never try to resume it because it has a pending
3644 		     status.  */
3645 		  lp->stopped = 1;
3646 
3647 		  /* Dead LWP's aren't expected to reported a pending
3648 		     sigstop.  */
3649 		  lp->signalled = 0;
3650 
3651 		  /* Store the pending event in the waitstatus as
3652 		     well, because W_EXITCODE(0,0) == 0.  */
3653 		  store_waitstatus (&lp->waitstatus, lp->status);
3654 		}
3655 
3656 	      /* Keep looking.  */
3657 	      lp = NULL;
3658 	    }
3659 
3660 	  if (new_pending)
3661 	    {
3662 	      /* Some LWP now has a pending event.  Go all the way
3663 		 back to check it.  */
3664 	      goto retry;
3665 	    }
3666 
3667 	  if (lp)
3668 	    {
3669 	      /* We got an event to report to the core.  */
3670 	      break;
3671 	    }
3672 
3673 	  /* Retry until nothing comes out of waitpid.  A single
3674 	     SIGCHLD can indicate more than one child stopped.  */
3675 	  continue;
3676 	}
3677 
3678       /* Check for zombie thread group leaders.  Those can't be reaped
3679 	 until all other threads in the thread group are.  */
3680       check_zombie_leaders ();
3681 
3682       /* If there are no resumed children left, bail.  We'd be stuck
3683 	 forever in the sigsuspend call below otherwise.  */
3684       if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3685 	{
3686 	  if (debug_linux_nat)
3687 	    fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3688 
3689 	  ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3690 
3691 	  if (!target_can_async_p ())
3692 	    clear_sigint_trap ();
3693 
3694 	  restore_child_signals_mask (&prev_mask);
3695 	  return minus_one_ptid;
3696 	}
3697 
3698       /* No interesting event to report to the core.  */
3699 
3700       if (target_options & TARGET_WNOHANG)
3701 	{
3702 	  if (debug_linux_nat)
3703 	    fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3704 
3705 	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
3706 	  restore_child_signals_mask (&prev_mask);
3707 	  return minus_one_ptid;
3708 	}
3709 
3710       /* We shouldn't end up here unless we want to try again.  */
3711       gdb_assert (lp == NULL);
3712 
3713       /* Block until we get an event reported with SIGCHLD.  */
3714       sigsuspend (&suspend_mask);
3715     }
3716 
3717   if (!target_can_async_p ())
3718     clear_sigint_trap ();
3719 
3720   gdb_assert (lp);
3721 
3722   status = lp->status;
3723   lp->status = 0;
3724 
3725   /* Don't report signals that GDB isn't interested in, such as
3726      signals that are neither printed nor stopped upon.  Stopping all
3727      threads can be a bit time-consuming so if we want decent
3728      performance with heavily multi-threaded programs, especially when
3729      they're using a high frequency timer, we'd better avoid it if we
3730      can.  */
3731 
3732   if (WIFSTOPPED (status))
3733     {
3734       enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3735 
3736       /* When using hardware single-step, we need to report every signal.
3737 	 Otherwise, signals in pass_mask may be short-circuited.  */
3738       if (!lp->step
3739 	  && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3740 	{
3741 	  /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3742 	     here?  It is not clear we should.  GDB may not expect
3743 	     other threads to run.  On the other hand, not resuming
3744 	     newly attached threads may cause an unwanted delay in
3745 	     getting them running.  */
3746 	  registers_changed ();
3747 	  if (linux_nat_prepare_to_resume != NULL)
3748 	    linux_nat_prepare_to_resume (lp);
3749 	  linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3750 				lp->step, signo);
3751 	  if (debug_linux_nat)
3752 	    fprintf_unfiltered (gdb_stdlog,
3753 				"LLW: %s %s, %s (preempt 'handle')\n",
3754 				lp->step ?
3755 				"PTRACE_SINGLESTEP" : "PTRACE_CONT",
3756 				target_pid_to_str (lp->ptid),
3757 				(signo != GDB_SIGNAL_0
3758 				 ? strsignal (gdb_signal_to_host (signo))
3759 				 : "0"));
3760 	  lp->stopped = 0;
3761 	  goto retry;
3762 	}
3763 
3764       if (!non_stop)
3765 	{
3766 	  /* Only do the below in all-stop, as we currently use SIGINT
3767 	     to implement target_stop (see linux_nat_stop) in
3768 	     non-stop.  */
3769 	  if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3770 	    {
3771 	      /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3772 		 forwarded to the entire process group, that is, all LWPs
3773 		 will receive it - unless they're using CLONE_THREAD to
3774 		 share signals.  Since we only want to report it once, we
3775 		 mark it as ignored for all LWPs except this one.  */
3776 	      iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3777 					      set_ignore_sigint, NULL);
3778 	      lp->ignore_sigint = 0;
3779 	    }
3780 	  else
3781 	    maybe_clear_ignore_sigint (lp);
3782 	}
3783     }
3784 
3785   /* This LWP is stopped now.  */
3786   lp->stopped = 1;
3787 
3788   if (debug_linux_nat)
3789     fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3790 			status_to_str (status), target_pid_to_str (lp->ptid));
3791 
3792   if (!non_stop)
3793     {
3794       /* Now stop all other LWP's ...  */
3795       iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3796 
3797       /* ... and wait until all of them have reported back that
3798 	 they're no longer running.  */
3799       iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3800 
3801       /* If we're not waiting for a specific LWP, choose an event LWP
3802 	 from among those that have had events.  Giving equal priority
3803 	 to all LWPs that have had events helps prevent
3804 	 starvation.  */
3805       if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3806 	select_event_lwp (ptid, &lp, &status);
3807 
3808       /* Now that we've selected our final event LWP, cancel any
3809 	 breakpoints in other LWPs that have hit a GDB breakpoint.
3810 	 See the comment in cancel_breakpoints_callback to find out
3811 	 why.  */
3812       iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3813 
3814       /* We'll need this to determine whether to report a SIGSTOP as
3815 	 TARGET_WAITKIND_0.  Need to take a copy because
3816 	 resume_clear_callback clears it.  */
3817       last_resume_kind = lp->last_resume_kind;
3818 
3819       /* In all-stop, from the core's perspective, all LWPs are now
3820 	 stopped until a new resume action is sent over.  */
3821       iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3822     }
3823   else
3824     {
3825       /* See above.  */
3826       last_resume_kind = lp->last_resume_kind;
3827       resume_clear_callback (lp, NULL);
3828     }
3829 
3830   if (linux_nat_status_is_event (status))
3831     {
3832       if (debug_linux_nat)
3833 	fprintf_unfiltered (gdb_stdlog,
3834 			    "LLW: trap ptid is %s.\n",
3835 			    target_pid_to_str (lp->ptid));
3836     }
3837 
3838   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3839     {
3840       *ourstatus = lp->waitstatus;
3841       lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3842     }
3843   else
3844     store_waitstatus (ourstatus, status);
3845 
3846   if (debug_linux_nat)
3847     fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3848 
3849   restore_child_signals_mask (&prev_mask);
3850 
3851   if (last_resume_kind == resume_stop
3852       && ourstatus->kind == TARGET_WAITKIND_STOPPED
3853       && WSTOPSIG (status) == SIGSTOP)
3854     {
3855       /* A thread that has been requested to stop by GDB with
3856 	 target_stop, and it stopped cleanly, so report as SIG0.  The
3857 	 use of SIGSTOP is an implementation detail.  */
3858       ourstatus->value.sig = GDB_SIGNAL_0;
3859     }
3860 
3861   if (ourstatus->kind == TARGET_WAITKIND_EXITED
3862       || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3863     lp->core = -1;
3864   else
3865     lp->core = linux_common_core_of_thread (lp->ptid);
3866 
3867   return lp->ptid;
3868 }
3869 
3870 /* Resume LWPs that are currently stopped without any pending status
3871    to report, but are resumed from the core's perspective.  */
3872 
3873 static int
3874 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3875 {
3876   ptid_t *wait_ptid_p = data;
3877 
3878   if (lp->stopped
3879       && lp->resumed
3880       && lp->status == 0
3881       && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3882     {
3883       struct regcache *regcache = get_thread_regcache (lp->ptid);
3884       struct gdbarch *gdbarch = get_regcache_arch (regcache);
3885       CORE_ADDR pc = regcache_read_pc (regcache);
3886 
3887       gdb_assert (is_executing (lp->ptid));
3888 
3889       /* Don't bother if there's a breakpoint at PC that we'd hit
3890 	 immediately, and we're not waiting for this LWP.  */
3891       if (!ptid_match (lp->ptid, *wait_ptid_p))
3892 	{
3893 	  if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3894 	    return 0;
3895 	}
3896 
3897       if (debug_linux_nat)
3898 	fprintf_unfiltered (gdb_stdlog,
3899 			    "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3900 			    target_pid_to_str (lp->ptid),
3901 			    paddress (gdbarch, pc),
3902 			    lp->step);
3903 
3904       registers_changed ();
3905       if (linux_nat_prepare_to_resume != NULL)
3906 	linux_nat_prepare_to_resume (lp);
3907       linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3908 			    lp->step, GDB_SIGNAL_0);
3909       lp->stopped = 0;
3910       lp->stopped_by_watchpoint = 0;
3911     }
3912 
3913   return 0;
3914 }
3915 
3916 static ptid_t
3917 linux_nat_wait (struct target_ops *ops,
3918 		ptid_t ptid, struct target_waitstatus *ourstatus,
3919 		int target_options)
3920 {
3921   ptid_t event_ptid;
3922 
3923   if (debug_linux_nat)
3924     {
3925       char *options_string;
3926 
3927       options_string = target_options_to_string (target_options);
3928       fprintf_unfiltered (gdb_stdlog,
3929 			  "linux_nat_wait: [%s], [%s]\n",
3930 			  target_pid_to_str (ptid),
3931 			  options_string);
3932       xfree (options_string);
3933     }
3934 
3935   /* Flush the async file first.  */
3936   if (target_can_async_p ())
3937     async_file_flush ();
3938 
3939   /* Resume LWPs that are currently stopped without any pending status
3940      to report, but are resumed from the core's perspective.  LWPs get
3941      in this state if we find them stopping at a time we're not
3942      interested in reporting the event (target_wait on a
3943      specific_process, for example, see linux_nat_wait_1), and
3944      meanwhile the event became uninteresting.  Don't bother resuming
3945      LWPs we're not going to wait for if they'd stop immediately.  */
3946   if (non_stop)
3947     iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3948 
3949   event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3950 
3951   /* If we requested any event, and something came out, assume there
3952      may be more.  If we requested a specific lwp or process, also
3953      assume there may be more.  */
3954   if (target_can_async_p ()
3955       && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3956 	   && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3957 	  || !ptid_equal (ptid, minus_one_ptid)))
3958     async_file_mark ();
3959 
3960   /* Get ready for the next event.  */
3961   if (target_can_async_p ())
3962     target_async (inferior_event_handler, 0);
3963 
3964   return event_ptid;
3965 }
3966 
3967 static int
3968 kill_callback (struct lwp_info *lp, void *data)
3969 {
3970   /* PTRACE_KILL may resume the inferior.  Send SIGKILL first.  */
3971 
3972   errno = 0;
3973   kill (GET_LWP (lp->ptid), SIGKILL);
3974   if (debug_linux_nat)
3975     fprintf_unfiltered (gdb_stdlog,
3976 			"KC:  kill (SIGKILL) %s, 0, 0 (%s)\n",
3977 			target_pid_to_str (lp->ptid),
3978 			errno ? safe_strerror (errno) : "OK");
3979 
3980   /* Some kernels ignore even SIGKILL for processes under ptrace.  */
3981 
3982   errno = 0;
3983   ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3984   if (debug_linux_nat)
3985     fprintf_unfiltered (gdb_stdlog,
3986 			"KC:  PTRACE_KILL %s, 0, 0 (%s)\n",
3987 			target_pid_to_str (lp->ptid),
3988 			errno ? safe_strerror (errno) : "OK");
3989 
3990   return 0;
3991 }
3992 
3993 static int
3994 kill_wait_callback (struct lwp_info *lp, void *data)
3995 {
3996   pid_t pid;
3997 
3998   /* We must make sure that there are no pending events (delayed
3999      SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4000      program doesn't interfere with any following debugging session.  */
4001 
4002   /* For cloned processes we must check both with __WCLONE and
4003      without, since the exit status of a cloned process isn't reported
4004      with __WCLONE.  */
4005   if (lp->cloned)
4006     {
4007       do
4008 	{
4009 	  pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4010 	  if (pid != (pid_t) -1)
4011 	    {
4012 	      if (debug_linux_nat)
4013 		fprintf_unfiltered (gdb_stdlog,
4014 				    "KWC: wait %s received unknown.\n",
4015 				    target_pid_to_str (lp->ptid));
4016 	      /* The Linux kernel sometimes fails to kill a thread
4017 		 completely after PTRACE_KILL; that goes from the stop
4018 		 point in do_fork out to the one in
4019 		 get_signal_to_deliever and waits again.  So kill it
4020 		 again.  */
4021 	      kill_callback (lp, NULL);
4022 	    }
4023 	}
4024       while (pid == GET_LWP (lp->ptid));
4025 
4026       gdb_assert (pid == -1 && errno == ECHILD);
4027     }
4028 
4029   do
4030     {
4031       pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4032       if (pid != (pid_t) -1)
4033 	{
4034 	  if (debug_linux_nat)
4035 	    fprintf_unfiltered (gdb_stdlog,
4036 				"KWC: wait %s received unk.\n",
4037 				target_pid_to_str (lp->ptid));
4038 	  /* See the call to kill_callback above.  */
4039 	  kill_callback (lp, NULL);
4040 	}
4041     }
4042   while (pid == GET_LWP (lp->ptid));
4043 
4044   gdb_assert (pid == -1 && errno == ECHILD);
4045   return 0;
4046 }
4047 
4048 static void
4049 linux_nat_kill (struct target_ops *ops)
4050 {
4051   struct target_waitstatus last;
4052   ptid_t last_ptid;
4053   int status;
4054 
4055   /* If we're stopped while forking and we haven't followed yet,
4056      kill the other task.  We need to do this first because the
4057      parent will be sleeping if this is a vfork.  */
4058 
4059   get_last_target_status (&last_ptid, &last);
4060 
4061   if (last.kind == TARGET_WAITKIND_FORKED
4062       || last.kind == TARGET_WAITKIND_VFORKED)
4063     {
4064       ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4065       wait (&status);
4066 
4067       /* Let the arch-specific native code know this process is
4068 	 gone.  */
4069       linux_nat_forget_process (PIDGET (last.value.related_pid));
4070     }
4071 
4072   if (forks_exist_p ())
4073     linux_fork_killall ();
4074   else
4075     {
4076       ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4077 
4078       /* Stop all threads before killing them, since ptrace requires
4079 	 that the thread is stopped to sucessfully PTRACE_KILL.  */
4080       iterate_over_lwps (ptid, stop_callback, NULL);
4081       /* ... and wait until all of them have reported back that
4082 	 they're no longer running.  */
4083       iterate_over_lwps (ptid, stop_wait_callback, NULL);
4084 
4085       /* Kill all LWP's ...  */
4086       iterate_over_lwps (ptid, kill_callback, NULL);
4087 
4088       /* ... and wait until we've flushed all events.  */
4089       iterate_over_lwps (ptid, kill_wait_callback, NULL);
4090     }
4091 
4092   target_mourn_inferior ();
4093 }
4094 
4095 static void
4096 linux_nat_mourn_inferior (struct target_ops *ops)
4097 {
4098   int pid = ptid_get_pid (inferior_ptid);
4099 
4100   purge_lwp_list (pid);
4101 
4102   if (! forks_exist_p ())
4103     /* Normal case, no other forks available.  */
4104     linux_ops->to_mourn_inferior (ops);
4105   else
4106     /* Multi-fork case.  The current inferior_ptid has exited, but
4107        there are other viable forks to debug.  Delete the exiting
4108        one and context-switch to the first available.  */
4109     linux_fork_mourn_inferior ();
4110 
4111   /* Let the arch-specific native code know this process is gone.  */
4112   linux_nat_forget_process (pid);
4113 }
4114 
4115 /* Convert a native/host siginfo object, into/from the siginfo in the
4116    layout of the inferiors' architecture.  */
4117 
4118 static void
4119 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
4120 {
4121   int done = 0;
4122 
4123   if (linux_nat_siginfo_fixup != NULL)
4124     done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4125 
4126   /* If there was no callback, or the callback didn't do anything,
4127      then just do a straight memcpy.  */
4128   if (!done)
4129     {
4130       if (direction == 1)
4131 	memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4132       else
4133 	memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4134     }
4135 }
4136 
4137 static LONGEST
4138 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4139                     const char *annex, gdb_byte *readbuf,
4140 		    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4141 {
4142   int pid;
4143   siginfo_t siginfo;
4144   gdb_byte inf_siginfo[sizeof (siginfo_t)];
4145 
4146   gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4147   gdb_assert (readbuf || writebuf);
4148 
4149   pid = GET_LWP (inferior_ptid);
4150   if (pid == 0)
4151     pid = GET_PID (inferior_ptid);
4152 
4153   if (offset > sizeof (siginfo))
4154     return -1;
4155 
4156   errno = 0;
4157   ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4158   if (errno != 0)
4159     return -1;
4160 
4161   /* When GDB is built as a 64-bit application, ptrace writes into
4162      SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
4163      inferior with a 64-bit GDB should look the same as debugging it
4164      with a 32-bit GDB, we need to convert it.  GDB core always sees
4165      the converted layout, so any read/write will have to be done
4166      post-conversion.  */
4167   siginfo_fixup (&siginfo, inf_siginfo, 0);
4168 
4169   if (offset + len > sizeof (siginfo))
4170     len = sizeof (siginfo) - offset;
4171 
4172   if (readbuf != NULL)
4173     memcpy (readbuf, inf_siginfo + offset, len);
4174   else
4175     {
4176       memcpy (inf_siginfo + offset, writebuf, len);
4177 
4178       /* Convert back to ptrace layout before flushing it out.  */
4179       siginfo_fixup (&siginfo, inf_siginfo, 1);
4180 
4181       errno = 0;
4182       ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4183       if (errno != 0)
4184 	return -1;
4185     }
4186 
4187   return len;
4188 }
4189 
4190 static LONGEST
4191 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4192 			const char *annex, gdb_byte *readbuf,
4193 			const gdb_byte *writebuf,
4194 			ULONGEST offset, LONGEST len)
4195 {
4196   struct cleanup *old_chain;
4197   LONGEST xfer;
4198 
4199   if (object == TARGET_OBJECT_SIGNAL_INFO)
4200     return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4201 			       offset, len);
4202 
4203   /* The target is connected but no live inferior is selected.  Pass
4204      this request down to a lower stratum (e.g., the executable
4205      file).  */
4206   if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4207     return 0;
4208 
4209   old_chain = save_inferior_ptid ();
4210 
4211   if (is_lwp (inferior_ptid))
4212     inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4213 
4214   xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4215 				     offset, len);
4216 
4217   do_cleanups (old_chain);
4218   return xfer;
4219 }
4220 
4221 static int
4222 linux_thread_alive (ptid_t ptid)
4223 {
4224   int err, tmp_errno;
4225 
4226   gdb_assert (is_lwp (ptid));
4227 
4228   /* Send signal 0 instead of anything ptrace, because ptracing a
4229      running thread errors out claiming that the thread doesn't
4230      exist.  */
4231   err = kill_lwp (GET_LWP (ptid), 0);
4232   tmp_errno = errno;
4233   if (debug_linux_nat)
4234     fprintf_unfiltered (gdb_stdlog,
4235 			"LLTA: KILL(SIG0) %s (%s)\n",
4236 			target_pid_to_str (ptid),
4237 			err ? safe_strerror (tmp_errno) : "OK");
4238 
4239   if (err != 0)
4240     return 0;
4241 
4242   return 1;
4243 }
4244 
4245 static int
4246 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4247 {
4248   return linux_thread_alive (ptid);
4249 }
4250 
4251 static char *
4252 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4253 {
4254   static char buf[64];
4255 
4256   if (is_lwp (ptid)
4257       && (GET_PID (ptid) != GET_LWP (ptid)
4258 	  || num_lwps (GET_PID (ptid)) > 1))
4259     {
4260       snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4261       return buf;
4262     }
4263 
4264   return normal_pid_to_str (ptid);
4265 }
4266 
4267 static char *
4268 linux_nat_thread_name (struct thread_info *thr)
4269 {
4270   int pid = ptid_get_pid (thr->ptid);
4271   long lwp = ptid_get_lwp (thr->ptid);
4272 #define FORMAT "/proc/%d/task/%ld/comm"
4273   char buf[sizeof (FORMAT) + 30];
4274   FILE *comm_file;
4275   char *result = NULL;
4276 
4277   snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4278   comm_file = fopen (buf, "r");
4279   if (comm_file)
4280     {
4281       /* Not exported by the kernel, so we define it here.  */
4282 #define COMM_LEN 16
4283       static char line[COMM_LEN + 1];
4284 
4285       if (fgets (line, sizeof (line), comm_file))
4286 	{
4287 	  char *nl = strchr (line, '\n');
4288 
4289 	  if (nl)
4290 	    *nl = '\0';
4291 	  if (*line != '\0')
4292 	    result = line;
4293 	}
4294 
4295       fclose (comm_file);
4296     }
4297 
4298 #undef COMM_LEN
4299 #undef FORMAT
4300 
4301   return result;
4302 }
4303 
4304 /* Accepts an integer PID; Returns a string representing a file that
4305    can be opened to get the symbols for the child process.  */
4306 
4307 static char *
4308 linux_child_pid_to_exec_file (int pid)
4309 {
4310   char *name1, *name2;
4311 
4312   name1 = xmalloc (MAXPATHLEN);
4313   name2 = xmalloc (MAXPATHLEN);
4314   make_cleanup (xfree, name1);
4315   make_cleanup (xfree, name2);
4316   memset (name2, 0, MAXPATHLEN);
4317 
4318   sprintf (name1, "/proc/%d/exe", pid);
4319   if (readlink (name1, name2, MAXPATHLEN - 1) > 0)
4320     return name2;
4321   else
4322     return name1;
4323 }
4324 
4325 /* Records the thread's register state for the corefile note
4326    section.  */
4327 
4328 static char *
4329 linux_nat_collect_thread_registers (const struct regcache *regcache,
4330 				    ptid_t ptid, bfd *obfd,
4331 				    char *note_data, int *note_size,
4332 				    enum gdb_signal stop_signal)
4333 {
4334   struct gdbarch *gdbarch = get_regcache_arch (regcache);
4335   const struct regset *regset;
4336   int core_regset_p;
4337   gdb_gregset_t gregs;
4338   gdb_fpregset_t fpregs;
4339 
4340   core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4341 
4342   if (core_regset_p
4343       && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4344 						     sizeof (gregs)))
4345 	 != NULL && regset->collect_regset != NULL)
4346     regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4347   else
4348     fill_gregset (regcache, &gregs, -1);
4349 
4350   note_data = (char *) elfcore_write_prstatus
4351 			 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4352 			  gdb_signal_to_host (stop_signal), &gregs);
4353 
4354   if (core_regset_p
4355       && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4356 						     sizeof (fpregs)))
4357 	  != NULL && regset->collect_regset != NULL)
4358     regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4359   else
4360     fill_fpregset (regcache, &fpregs, -1);
4361 
4362   note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4363 					      &fpregs, sizeof (fpregs));
4364 
4365   return note_data;
4366 }
4367 
4368 /* Fills the "to_make_corefile_note" target vector.  Builds the note
4369    section for a corefile, and returns it in a malloc buffer.  */
4370 
4371 static char *
4372 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4373 {
4374   /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4375      converted to gdbarch_core_regset_sections, this function can go away.  */
4376   return linux_make_corefile_notes (target_gdbarch (), obfd, note_size,
4377 				    linux_nat_collect_thread_registers);
4378 }
4379 
4380 /* Implement the to_xfer_partial interface for memory reads using the /proc
4381    filesystem.  Because we can use a single read() call for /proc, this
4382    can be much more efficient than banging away at PTRACE_PEEKTEXT,
4383    but it doesn't support writes.  */
4384 
4385 static LONGEST
4386 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4387 			 const char *annex, gdb_byte *readbuf,
4388 			 const gdb_byte *writebuf,
4389 			 ULONGEST offset, LONGEST len)
4390 {
4391   LONGEST ret;
4392   int fd;
4393   char filename[64];
4394 
4395   if (object != TARGET_OBJECT_MEMORY || !readbuf)
4396     return 0;
4397 
4398   /* Don't bother for one word.  */
4399   if (len < 3 * sizeof (long))
4400     return 0;
4401 
4402   /* We could keep this file open and cache it - possibly one per
4403      thread.  That requires some juggling, but is even faster.  */
4404   sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4405   fd = open (filename, O_RDONLY | O_LARGEFILE);
4406   if (fd == -1)
4407     return 0;
4408 
4409   /* If pread64 is available, use it.  It's faster if the kernel
4410      supports it (only one syscall), and it's 64-bit safe even on
4411      32-bit platforms (for instance, SPARC debugging a SPARC64
4412      application).  */
4413 #ifdef HAVE_PREAD64
4414   if (pread64 (fd, readbuf, len, offset) != len)
4415 #else
4416   if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4417 #endif
4418     ret = 0;
4419   else
4420     ret = len;
4421 
4422   close (fd);
4423   return ret;
4424 }
4425 
4426 
4427 /* Enumerate spufs IDs for process PID.  */
4428 static LONGEST
4429 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4430 {
4431   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4432   LONGEST pos = 0;
4433   LONGEST written = 0;
4434   char path[128];
4435   DIR *dir;
4436   struct dirent *entry;
4437 
4438   xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4439   dir = opendir (path);
4440   if (!dir)
4441     return -1;
4442 
4443   rewinddir (dir);
4444   while ((entry = readdir (dir)) != NULL)
4445     {
4446       struct stat st;
4447       struct statfs stfs;
4448       int fd;
4449 
4450       fd = atoi (entry->d_name);
4451       if (!fd)
4452 	continue;
4453 
4454       xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4455       if (stat (path, &st) != 0)
4456 	continue;
4457       if (!S_ISDIR (st.st_mode))
4458 	continue;
4459 
4460       if (statfs (path, &stfs) != 0)
4461 	continue;
4462       if (stfs.f_type != SPUFS_MAGIC)
4463 	continue;
4464 
4465       if (pos >= offset && pos + 4 <= offset + len)
4466 	{
4467 	  store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4468 	  written += 4;
4469 	}
4470       pos += 4;
4471     }
4472 
4473   closedir (dir);
4474   return written;
4475 }
4476 
4477 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4478    object type, using the /proc file system.  */
4479 static LONGEST
4480 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4481 		     const char *annex, gdb_byte *readbuf,
4482 		     const gdb_byte *writebuf,
4483 		     ULONGEST offset, LONGEST len)
4484 {
4485   char buf[128];
4486   int fd = 0;
4487   int ret = -1;
4488   int pid = PIDGET (inferior_ptid);
4489 
4490   if (!annex)
4491     {
4492       if (!readbuf)
4493 	return -1;
4494       else
4495 	return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4496     }
4497 
4498   xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4499   fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4500   if (fd <= 0)
4501     return -1;
4502 
4503   if (offset != 0
4504       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4505     {
4506       close (fd);
4507       return 0;
4508     }
4509 
4510   if (writebuf)
4511     ret = write (fd, writebuf, (size_t) len);
4512   else if (readbuf)
4513     ret = read (fd, readbuf, (size_t) len);
4514 
4515   close (fd);
4516   return ret;
4517 }
4518 
4519 
4520 /* Parse LINE as a signal set and add its set bits to SIGS.  */
4521 
4522 static void
4523 add_line_to_sigset (const char *line, sigset_t *sigs)
4524 {
4525   int len = strlen (line) - 1;
4526   const char *p;
4527   int signum;
4528 
4529   if (line[len] != '\n')
4530     error (_("Could not parse signal set: %s"), line);
4531 
4532   p = line;
4533   signum = len * 4;
4534   while (len-- > 0)
4535     {
4536       int digit;
4537 
4538       if (*p >= '0' && *p <= '9')
4539 	digit = *p - '0';
4540       else if (*p >= 'a' && *p <= 'f')
4541 	digit = *p - 'a' + 10;
4542       else
4543 	error (_("Could not parse signal set: %s"), line);
4544 
4545       signum -= 4;
4546 
4547       if (digit & 1)
4548 	sigaddset (sigs, signum + 1);
4549       if (digit & 2)
4550 	sigaddset (sigs, signum + 2);
4551       if (digit & 4)
4552 	sigaddset (sigs, signum + 3);
4553       if (digit & 8)
4554 	sigaddset (sigs, signum + 4);
4555 
4556       p++;
4557     }
4558 }
4559 
4560 /* Find process PID's pending signals from /proc/pid/status and set
4561    SIGS to match.  */
4562 
4563 void
4564 linux_proc_pending_signals (int pid, sigset_t *pending,
4565 			    sigset_t *blocked, sigset_t *ignored)
4566 {
4567   FILE *procfile;
4568   char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4569   struct cleanup *cleanup;
4570 
4571   sigemptyset (pending);
4572   sigemptyset (blocked);
4573   sigemptyset (ignored);
4574   sprintf (fname, "/proc/%d/status", pid);
4575   procfile = fopen (fname, "r");
4576   if (procfile == NULL)
4577     error (_("Could not open %s"), fname);
4578   cleanup = make_cleanup_fclose (procfile);
4579 
4580   while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4581     {
4582       /* Normal queued signals are on the SigPnd line in the status
4583 	 file.  However, 2.6 kernels also have a "shared" pending
4584 	 queue for delivering signals to a thread group, so check for
4585 	 a ShdPnd line also.
4586 
4587 	 Unfortunately some Red Hat kernels include the shared pending
4588 	 queue but not the ShdPnd status field.  */
4589 
4590       if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4591 	add_line_to_sigset (buffer + 8, pending);
4592       else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4593 	add_line_to_sigset (buffer + 8, pending);
4594       else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4595 	add_line_to_sigset (buffer + 8, blocked);
4596       else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4597 	add_line_to_sigset (buffer + 8, ignored);
4598     }
4599 
4600   do_cleanups (cleanup);
4601 }
4602 
4603 static LONGEST
4604 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4605 		       const char *annex, gdb_byte *readbuf,
4606 		       const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4607 {
4608   gdb_assert (object == TARGET_OBJECT_OSDATA);
4609 
4610   return linux_common_xfer_osdata (annex, readbuf, offset, len);
4611 }
4612 
4613 static LONGEST
4614 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4615                     const char *annex, gdb_byte *readbuf,
4616 		    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4617 {
4618   LONGEST xfer;
4619 
4620   if (object == TARGET_OBJECT_AUXV)
4621     return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4622 			     offset, len);
4623 
4624   if (object == TARGET_OBJECT_OSDATA)
4625     return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4626                                offset, len);
4627 
4628   if (object == TARGET_OBJECT_SPU)
4629     return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4630 				offset, len);
4631 
4632   /* GDB calculates all the addresses in possibly larget width of the address.
4633      Address width needs to be masked before its final use - either by
4634      linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4635 
4636      Compare ADDR_BIT first to avoid a compiler warning on shift overflow.  */
4637 
4638   if (object == TARGET_OBJECT_MEMORY)
4639     {
4640       int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4641 
4642       if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4643 	offset &= ((ULONGEST) 1 << addr_bit) - 1;
4644     }
4645 
4646   xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4647 				  offset, len);
4648   if (xfer != 0)
4649     return xfer;
4650 
4651   return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4652 			     offset, len);
4653 }
4654 
4655 static void
4656 cleanup_target_stop (void *arg)
4657 {
4658   ptid_t *ptid = (ptid_t *) arg;
4659 
4660   gdb_assert (arg != NULL);
4661 
4662   /* Unpause all */
4663   target_resume (*ptid, 0, GDB_SIGNAL_0);
4664 }
4665 
4666 static VEC(static_tracepoint_marker_p) *
4667 linux_child_static_tracepoint_markers_by_strid (const char *strid)
4668 {
4669   char s[IPA_CMD_BUF_SIZE];
4670   struct cleanup *old_chain;
4671   int pid = ptid_get_pid (inferior_ptid);
4672   VEC(static_tracepoint_marker_p) *markers = NULL;
4673   struct static_tracepoint_marker *marker = NULL;
4674   char *p = s;
4675   ptid_t ptid = ptid_build (pid, 0, 0);
4676 
4677   /* Pause all */
4678   target_stop (ptid);
4679 
4680   memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4681   s[sizeof ("qTfSTM")] = 0;
4682 
4683   agent_run_command (pid, s, strlen (s) + 1);
4684 
4685   old_chain = make_cleanup (free_current_marker, &marker);
4686   make_cleanup (cleanup_target_stop, &ptid);
4687 
4688   while (*p++ == 'm')
4689     {
4690       if (marker == NULL)
4691 	marker = XCNEW (struct static_tracepoint_marker);
4692 
4693       do
4694 	{
4695 	  parse_static_tracepoint_marker_definition (p, &p, marker);
4696 
4697 	  if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4698 	    {
4699 	      VEC_safe_push (static_tracepoint_marker_p,
4700 			     markers, marker);
4701 	      marker = NULL;
4702 	    }
4703 	  else
4704 	    {
4705 	      release_static_tracepoint_marker (marker);
4706 	      memset (marker, 0, sizeof (*marker));
4707 	    }
4708 	}
4709       while (*p++ == ',');	/* comma-separated list */
4710 
4711       memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4712       s[sizeof ("qTsSTM")] = 0;
4713       agent_run_command (pid, s, strlen (s) + 1);
4714       p = s;
4715     }
4716 
4717   do_cleanups (old_chain);
4718 
4719   return markers;
4720 }
4721 
4722 /* Create a prototype generic GNU/Linux target.  The client can override
4723    it with local methods.  */
4724 
4725 static void
4726 linux_target_install_ops (struct target_ops *t)
4727 {
4728   t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4729   t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4730   t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4731   t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4732   t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4733   t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4734   t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4735   t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4736   t->to_post_startup_inferior = linux_child_post_startup_inferior;
4737   t->to_post_attach = linux_child_post_attach;
4738   t->to_follow_fork = linux_child_follow_fork;
4739   t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4740 
4741   super_xfer_partial = t->to_xfer_partial;
4742   t->to_xfer_partial = linux_xfer_partial;
4743 
4744   t->to_static_tracepoint_markers_by_strid
4745     = linux_child_static_tracepoint_markers_by_strid;
4746 }
4747 
4748 struct target_ops *
4749 linux_target (void)
4750 {
4751   struct target_ops *t;
4752 
4753   t = inf_ptrace_target ();
4754   linux_target_install_ops (t);
4755 
4756   return t;
4757 }
4758 
4759 struct target_ops *
4760 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4761 {
4762   struct target_ops *t;
4763 
4764   t = inf_ptrace_trad_target (register_u_offset);
4765   linux_target_install_ops (t);
4766 
4767   return t;
4768 }
4769 
4770 /* target_is_async_p implementation.  */
4771 
4772 static int
4773 linux_nat_is_async_p (void)
4774 {
4775   /* NOTE: palves 2008-03-21: We're only async when the user requests
4776      it explicitly with the "set target-async" command.
4777      Someday, linux will always be async.  */
4778   return target_async_permitted;
4779 }
4780 
4781 /* target_can_async_p implementation.  */
4782 
4783 static int
4784 linux_nat_can_async_p (void)
4785 {
4786   /* NOTE: palves 2008-03-21: We're only async when the user requests
4787      it explicitly with the "set target-async" command.
4788      Someday, linux will always be async.  */
4789   return target_async_permitted;
4790 }
4791 
4792 static int
4793 linux_nat_supports_non_stop (void)
4794 {
4795   return 1;
4796 }
4797 
4798 /* True if we want to support multi-process.  To be removed when GDB
4799    supports multi-exec.  */
4800 
4801 int linux_multi_process = 1;
4802 
4803 static int
4804 linux_nat_supports_multi_process (void)
4805 {
4806   return linux_multi_process;
4807 }
4808 
4809 static int
4810 linux_nat_supports_disable_randomization (void)
4811 {
4812 #ifdef HAVE_PERSONALITY
4813   return 1;
4814 #else
4815   return 0;
4816 #endif
4817 }
4818 
4819 static int async_terminal_is_ours = 1;
4820 
4821 /* target_terminal_inferior implementation.  */
4822 
4823 static void
4824 linux_nat_terminal_inferior (void)
4825 {
4826   if (!target_is_async_p ())
4827     {
4828       /* Async mode is disabled.  */
4829       terminal_inferior ();
4830       return;
4831     }
4832 
4833   terminal_inferior ();
4834 
4835   /* Calls to target_terminal_*() are meant to be idempotent.  */
4836   if (!async_terminal_is_ours)
4837     return;
4838 
4839   delete_file_handler (input_fd);
4840   async_terminal_is_ours = 0;
4841   set_sigint_trap ();
4842 }
4843 
4844 /* target_terminal_ours implementation.  */
4845 
4846 static void
4847 linux_nat_terminal_ours (void)
4848 {
4849   if (!target_is_async_p ())
4850     {
4851       /* Async mode is disabled.  */
4852       terminal_ours ();
4853       return;
4854     }
4855 
4856   /* GDB should never give the terminal to the inferior if the
4857      inferior is running in the background (run&, continue&, etc.),
4858      but claiming it sure should.  */
4859   terminal_ours ();
4860 
4861   if (async_terminal_is_ours)
4862     return;
4863 
4864   clear_sigint_trap ();
4865   add_file_handler (input_fd, stdin_event_handler, 0);
4866   async_terminal_is_ours = 1;
4867 }
4868 
4869 static void (*async_client_callback) (enum inferior_event_type event_type,
4870 				      void *context);
4871 static void *async_client_context;
4872 
4873 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4874    so we notice when any child changes state, and notify the
4875    event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4876    above to wait for the arrival of a SIGCHLD.  */
4877 
4878 static void
4879 sigchld_handler (int signo)
4880 {
4881   int old_errno = errno;
4882 
4883   if (debug_linux_nat)
4884     ui_file_write_async_safe (gdb_stdlog,
4885 			      "sigchld\n", sizeof ("sigchld\n") - 1);
4886 
4887   if (signo == SIGCHLD
4888       && linux_nat_event_pipe[0] != -1)
4889     async_file_mark (); /* Let the event loop know that there are
4890 			   events to handle.  */
4891 
4892   errno = old_errno;
4893 }
4894 
4895 /* Callback registered with the target events file descriptor.  */
4896 
4897 static void
4898 handle_target_event (int error, gdb_client_data client_data)
4899 {
4900   (*async_client_callback) (INF_REG_EVENT, async_client_context);
4901 }
4902 
4903 /* Create/destroy the target events pipe.  Returns previous state.  */
4904 
4905 static int
4906 linux_async_pipe (int enable)
4907 {
4908   int previous = (linux_nat_event_pipe[0] != -1);
4909 
4910   if (previous != enable)
4911     {
4912       sigset_t prev_mask;
4913 
4914       block_child_signals (&prev_mask);
4915 
4916       if (enable)
4917 	{
4918 	  if (pipe (linux_nat_event_pipe) == -1)
4919 	    internal_error (__FILE__, __LINE__,
4920 			    "creating event pipe failed.");
4921 
4922 	  fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4923 	  fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4924 	}
4925       else
4926 	{
4927 	  close (linux_nat_event_pipe[0]);
4928 	  close (linux_nat_event_pipe[1]);
4929 	  linux_nat_event_pipe[0] = -1;
4930 	  linux_nat_event_pipe[1] = -1;
4931 	}
4932 
4933       restore_child_signals_mask (&prev_mask);
4934     }
4935 
4936   return previous;
4937 }
4938 
4939 /* target_async implementation.  */
4940 
4941 static void
4942 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4943 				   void *context), void *context)
4944 {
4945   if (callback != NULL)
4946     {
4947       async_client_callback = callback;
4948       async_client_context = context;
4949       if (!linux_async_pipe (1))
4950 	{
4951 	  add_file_handler (linux_nat_event_pipe[0],
4952 			    handle_target_event, NULL);
4953 	  /* There may be pending events to handle.  Tell the event loop
4954 	     to poll them.  */
4955 	  async_file_mark ();
4956 	}
4957     }
4958   else
4959     {
4960       async_client_callback = callback;
4961       async_client_context = context;
4962       delete_file_handler (linux_nat_event_pipe[0]);
4963       linux_async_pipe (0);
4964     }
4965   return;
4966 }
4967 
4968 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4969    event came out.  */
4970 
4971 static int
4972 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4973 {
4974   if (!lwp->stopped)
4975     {
4976       if (debug_linux_nat)
4977 	fprintf_unfiltered (gdb_stdlog,
4978 			    "LNSL: running -> suspending %s\n",
4979 			    target_pid_to_str (lwp->ptid));
4980 
4981 
4982       if (lwp->last_resume_kind == resume_stop)
4983 	{
4984 	  if (debug_linux_nat)
4985 	    fprintf_unfiltered (gdb_stdlog,
4986 				"linux-nat: already stopping LWP %ld at "
4987 				"GDB's request\n",
4988 				ptid_get_lwp (lwp->ptid));
4989 	  return 0;
4990 	}
4991 
4992       stop_callback (lwp, NULL);
4993       lwp->last_resume_kind = resume_stop;
4994     }
4995   else
4996     {
4997       /* Already known to be stopped; do nothing.  */
4998 
4999       if (debug_linux_nat)
5000 	{
5001 	  if (find_thread_ptid (lwp->ptid)->stop_requested)
5002 	    fprintf_unfiltered (gdb_stdlog,
5003 				"LNSL: already stopped/stop_requested %s\n",
5004 				target_pid_to_str (lwp->ptid));
5005 	  else
5006 	    fprintf_unfiltered (gdb_stdlog,
5007 				"LNSL: already stopped/no "
5008 				"stop_requested yet %s\n",
5009 				target_pid_to_str (lwp->ptid));
5010 	}
5011     }
5012   return 0;
5013 }
5014 
5015 static void
5016 linux_nat_stop (ptid_t ptid)
5017 {
5018   if (non_stop)
5019     iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5020   else
5021     linux_ops->to_stop (ptid);
5022 }
5023 
5024 static void
5025 linux_nat_close (int quitting)
5026 {
5027   /* Unregister from the event loop.  */
5028   if (linux_nat_is_async_p ())
5029     linux_nat_async (NULL, 0);
5030 
5031   if (linux_ops->to_close)
5032     linux_ops->to_close (quitting);
5033 }
5034 
5035 /* When requests are passed down from the linux-nat layer to the
5036    single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5037    used.  The address space pointer is stored in the inferior object,
5038    but the common code that is passed such ptid can't tell whether
5039    lwpid is a "main" process id or not (it assumes so).  We reverse
5040    look up the "main" process id from the lwp here.  */
5041 
5042 static struct address_space *
5043 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5044 {
5045   struct lwp_info *lwp;
5046   struct inferior *inf;
5047   int pid;
5048 
5049   pid = GET_LWP (ptid);
5050   if (GET_LWP (ptid) == 0)
5051     {
5052       /* An (lwpid,0,0) ptid.  Look up the lwp object to get at the
5053 	 tgid.  */
5054       lwp = find_lwp_pid (ptid);
5055       pid = GET_PID (lwp->ptid);
5056     }
5057   else
5058     {
5059       /* A (pid,lwpid,0) ptid.  */
5060       pid = GET_PID (ptid);
5061     }
5062 
5063   inf = find_inferior_pid (pid);
5064   gdb_assert (inf != NULL);
5065   return inf->aspace;
5066 }
5067 
5068 /* Return the cached value of the processor core for thread PTID.  */
5069 
5070 static int
5071 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5072 {
5073   struct lwp_info *info = find_lwp_pid (ptid);
5074 
5075   if (info)
5076     return info->core;
5077   return -1;
5078 }
5079 
5080 void
5081 linux_nat_add_target (struct target_ops *t)
5082 {
5083   /* Save the provided single-threaded target.  We save this in a separate
5084      variable because another target we've inherited from (e.g. inf-ptrace)
5085      may have saved a pointer to T; we want to use it for the final
5086      process stratum target.  */
5087   linux_ops_saved = *t;
5088   linux_ops = &linux_ops_saved;
5089 
5090   /* Override some methods for multithreading.  */
5091   t->to_create_inferior = linux_nat_create_inferior;
5092   t->to_attach = linux_nat_attach;
5093   t->to_detach = linux_nat_detach;
5094   t->to_resume = linux_nat_resume;
5095   t->to_wait = linux_nat_wait;
5096   t->to_pass_signals = linux_nat_pass_signals;
5097   t->to_xfer_partial = linux_nat_xfer_partial;
5098   t->to_kill = linux_nat_kill;
5099   t->to_mourn_inferior = linux_nat_mourn_inferior;
5100   t->to_thread_alive = linux_nat_thread_alive;
5101   t->to_pid_to_str = linux_nat_pid_to_str;
5102   t->to_thread_name = linux_nat_thread_name;
5103   t->to_has_thread_control = tc_schedlock;
5104   t->to_thread_address_space = linux_nat_thread_address_space;
5105   t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5106   t->to_stopped_data_address = linux_nat_stopped_data_address;
5107 
5108   t->to_can_async_p = linux_nat_can_async_p;
5109   t->to_is_async_p = linux_nat_is_async_p;
5110   t->to_supports_non_stop = linux_nat_supports_non_stop;
5111   t->to_async = linux_nat_async;
5112   t->to_terminal_inferior = linux_nat_terminal_inferior;
5113   t->to_terminal_ours = linux_nat_terminal_ours;
5114   t->to_close = linux_nat_close;
5115 
5116   /* Methods for non-stop support.  */
5117   t->to_stop = linux_nat_stop;
5118 
5119   t->to_supports_multi_process = linux_nat_supports_multi_process;
5120 
5121   t->to_supports_disable_randomization
5122     = linux_nat_supports_disable_randomization;
5123 
5124   t->to_core_of_thread = linux_nat_core_of_thread;
5125 
5126   /* We don't change the stratum; this target will sit at
5127      process_stratum and thread_db will set at thread_stratum.  This
5128      is a little strange, since this is a multi-threaded-capable
5129      target, but we want to be on the stack below thread_db, and we
5130      also want to be used for single-threaded processes.  */
5131 
5132   add_target (t);
5133 }
5134 
5135 /* Register a method to call whenever a new thread is attached.  */
5136 void
5137 linux_nat_set_new_thread (struct target_ops *t,
5138 			  void (*new_thread) (struct lwp_info *))
5139 {
5140   /* Save the pointer.  We only support a single registered instance
5141      of the GNU/Linux native target, so we do not need to map this to
5142      T.  */
5143   linux_nat_new_thread = new_thread;
5144 }
5145 
5146 /* See declaration in linux-nat.h.  */
5147 
5148 void
5149 linux_nat_set_new_fork (struct target_ops *t,
5150 			linux_nat_new_fork_ftype *new_fork)
5151 {
5152   /* Save the pointer.  */
5153   linux_nat_new_fork = new_fork;
5154 }
5155 
5156 /* See declaration in linux-nat.h.  */
5157 
5158 void
5159 linux_nat_set_forget_process (struct target_ops *t,
5160 			      linux_nat_forget_process_ftype *fn)
5161 {
5162   /* Save the pointer.  */
5163   linux_nat_forget_process_hook = fn;
5164 }
5165 
5166 /* See declaration in linux-nat.h.  */
5167 
5168 void
5169 linux_nat_forget_process (pid_t pid)
5170 {
5171   if (linux_nat_forget_process_hook != NULL)
5172     linux_nat_forget_process_hook (pid);
5173 }
5174 
5175 /* Register a method that converts a siginfo object between the layout
5176    that ptrace returns, and the layout in the architecture of the
5177    inferior.  */
5178 void
5179 linux_nat_set_siginfo_fixup (struct target_ops *t,
5180 			     int (*siginfo_fixup) (siginfo_t *,
5181 						   gdb_byte *,
5182 						   int))
5183 {
5184   /* Save the pointer.  */
5185   linux_nat_siginfo_fixup = siginfo_fixup;
5186 }
5187 
5188 /* Register a method to call prior to resuming a thread.  */
5189 
5190 void
5191 linux_nat_set_prepare_to_resume (struct target_ops *t,
5192 				 void (*prepare_to_resume) (struct lwp_info *))
5193 {
5194   /* Save the pointer.  */
5195   linux_nat_prepare_to_resume = prepare_to_resume;
5196 }
5197 
5198 /* See linux-nat.h.  */
5199 
5200 int
5201 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
5202 {
5203   int pid;
5204 
5205   pid = GET_LWP (ptid);
5206   if (pid == 0)
5207     pid = GET_PID (ptid);
5208 
5209   errno = 0;
5210   ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5211   if (errno != 0)
5212     {
5213       memset (siginfo, 0, sizeof (*siginfo));
5214       return 0;
5215     }
5216   return 1;
5217 }
5218 
5219 /* Provide a prototype to silence -Wmissing-prototypes.  */
5220 extern initialize_file_ftype _initialize_linux_nat;
5221 
5222 void
5223 _initialize_linux_nat (void)
5224 {
5225   add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
5226 			     &debug_linux_nat, _("\
5227 Set debugging of GNU/Linux lwp module."), _("\
5228 Show debugging of GNU/Linux lwp module."), _("\
5229 Enables printf debugging output."),
5230 			     NULL,
5231 			     show_debug_linux_nat,
5232 			     &setdebuglist, &showdebuglist);
5233 
5234   /* Save this mask as the default.  */
5235   sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5236 
5237   /* Install a SIGCHLD handler.  */
5238   sigchld_action.sa_handler = sigchld_handler;
5239   sigemptyset (&sigchld_action.sa_mask);
5240   sigchld_action.sa_flags = SA_RESTART;
5241 
5242   /* Make it the default.  */
5243   sigaction (SIGCHLD, &sigchld_action, NULL);
5244 
5245   /* Make sure we don't block SIGCHLD during a sigsuspend.  */
5246   sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5247   sigdelset (&suspend_mask, SIGCHLD);
5248 
5249   sigemptyset (&blocked_mask);
5250 }
5251 
5252 
5253 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5254    the GNU/Linux Threads library and therefore doesn't really belong
5255    here.  */
5256 
5257 /* Read variable NAME in the target and return its value if found.
5258    Otherwise return zero.  It is assumed that the type of the variable
5259    is `int'.  */
5260 
5261 static int
5262 get_signo (const char *name)
5263 {
5264   struct minimal_symbol *ms;
5265   int signo;
5266 
5267   ms = lookup_minimal_symbol (name, NULL, NULL);
5268   if (ms == NULL)
5269     return 0;
5270 
5271   if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5272 			  sizeof (signo)) != 0)
5273     return 0;
5274 
5275   return signo;
5276 }
5277 
5278 /* Return the set of signals used by the threads library in *SET.  */
5279 
5280 void
5281 lin_thread_get_thread_signals (sigset_t *set)
5282 {
5283   struct sigaction action;
5284   int restart, cancel;
5285 
5286   sigemptyset (&blocked_mask);
5287   sigemptyset (set);
5288 
5289   restart = get_signo ("__pthread_sig_restart");
5290   cancel = get_signo ("__pthread_sig_cancel");
5291 
5292   /* LinuxThreads normally uses the first two RT signals, but in some legacy
5293      cases may use SIGUSR1/SIGUSR2.  NPTL always uses RT signals, but does
5294      not provide any way for the debugger to query the signal numbers -
5295      fortunately they don't change!  */
5296 
5297   if (restart == 0)
5298     restart = __SIGRTMIN;
5299 
5300   if (cancel == 0)
5301     cancel = __SIGRTMIN + 1;
5302 
5303   sigaddset (set, restart);
5304   sigaddset (set, cancel);
5305 
5306   /* The GNU/Linux Threads library makes terminating threads send a
5307      special "cancel" signal instead of SIGCHLD.  Make sure we catch
5308      those (to prevent them from terminating GDB itself, which is
5309      likely to be their default action) and treat them the same way as
5310      SIGCHLD.  */
5311 
5312   action.sa_handler = sigchld_handler;
5313   sigemptyset (&action.sa_mask);
5314   action.sa_flags = SA_RESTART;
5315   sigaction (cancel, &action, NULL);
5316 
5317   /* We block the "cancel" signal throughout this code ...  */
5318   sigaddset (&blocked_mask, cancel);
5319   sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5320 
5321   /* ... except during a sigsuspend.  */
5322   sigdelset (&suspend_mask, cancel);
5323 }
5324