xref: /netbsd-src/external/gpl3/gdb/dist/gdb/linux-nat.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /* GNU/Linux native-dependent code common to multiple platforms.
2 
3    Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4    2011 Free Software Foundation, Inc.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h>		/* for MAXPATHLEN */
41 #include <sys/procfs.h>		/* for elf_gregset etc.  */
42 #include "elf-bfd.h"		/* for elfcore_write_* */
43 #include "gregset.h"		/* for gregset */
44 #include "gdbcore.h"		/* for get_exec_file */
45 #include <ctype.h>		/* for isdigit */
46 #include "gdbthread.h"		/* for struct thread_info etc.  */
47 #include "gdb_stat.h"		/* for struct stat */
48 #include <fcntl.h>		/* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include "gdb_dirent.h"
55 #include "xml-support.h"
56 #include "terminal.h"
57 #include <sys/vfs.h>
58 #include "solib.h"
59 
60 #ifndef SPUFS_MAGIC
61 #define SPUFS_MAGIC 0x23c9b64e
62 #endif
63 
64 #ifdef HAVE_PERSONALITY
65 # include <sys/personality.h>
66 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
67 #  define ADDR_NO_RANDOMIZE 0x0040000
68 # endif
69 #endif /* HAVE_PERSONALITY */
70 
71 /* This comment documents high-level logic of this file.
72 
73 Waiting for events in sync mode
74 ===============================
75 
76 When waiting for an event in a specific thread, we just use waitpid, passing
77 the specific pid, and not passing WNOHANG.
78 
79 When waiting for an event in all threads, waitpid is not quite good.  Prior to
80 version 2.4, Linux can either wait for event in main thread, or in secondary
81 threads.  (2.4 has the __WALL flag).  So, if we use blocking waitpid, we might
82 miss an event.  The solution is to use non-blocking waitpid, together with
83 sigsuspend.  First, we use non-blocking waitpid to get an event in the main
84 process, if any.  Second, we use non-blocking waitpid with the __WCLONED
85 flag to check for events in cloned processes.  If nothing is found, we use
86 sigsuspend to wait for SIGCHLD.  When SIGCHLD arrives, it means something
87 happened to a child process -- and SIGCHLD will be delivered both for events
88 in main debugged process and in cloned processes.  As soon as we know there's
89 an event, we get back to calling nonblocking waitpid with and without
90 __WCLONED.
91 
92 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
93 so that we don't miss a signal.  If SIGCHLD arrives in between, when it's
94 blocked, the signal becomes pending and sigsuspend immediately
95 notices it and returns.
96 
97 Waiting for events in async mode
98 ================================
99 
100 In async mode, GDB should always be ready to handle both user input
101 and target events, so neither blocking waitpid nor sigsuspend are
102 viable options.  Instead, we should asynchronously notify the GDB main
103 event loop whenever there's an unprocessed event from the target.  We
104 detect asynchronous target events by handling SIGCHLD signals.  To
105 notify the event loop about target events, the self-pipe trick is used
106 --- a pipe is registered as waitable event source in the event loop,
107 the event loop select/poll's on the read end of this pipe (as well on
108 other event sources, e.g., stdin), and the SIGCHLD handler writes a
109 byte to this pipe.  This is more portable than relying on
110 pselect/ppoll, since on kernels that lack those syscalls, libc
111 emulates them with select/poll+sigprocmask, and that is racy
112 (a.k.a. plain broken).
113 
114 Obviously, if we fail to notify the event loop if there's a target
115 event, it's bad.  OTOH, if we notify the event loop when there's no
116 event from the target, linux_nat_wait will detect that there's no real
117 event to report, and return event of type TARGET_WAITKIND_IGNORE.
118 This is mostly harmless, but it will waste time and is better avoided.
119 
120 The main design point is that every time GDB is outside linux-nat.c,
121 we have a SIGCHLD handler installed that is called when something
122 happens to the target and notifies the GDB event loop.  Whenever GDB
123 core decides to handle the event, and calls into linux-nat.c, we
124 process things as in sync mode, except that the we never block in
125 sigsuspend.
126 
127 While processing an event, we may end up momentarily blocked in
128 waitpid calls.  Those waitpid calls, while blocking, are guarantied to
129 return quickly.  E.g., in all-stop mode, before reporting to the core
130 that an LWP hit a breakpoint, all LWPs are stopped by sending them
131 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
132 Note that this is different from blocking indefinitely waiting for the
133 next event --- here, we're already handling an event.
134 
135 Use of signals
136 ==============
137 
138 We stop threads by sending a SIGSTOP.  The use of SIGSTOP instead of another
139 signal is not entirely significant; we just need for a signal to be delivered,
140 so that we can intercept it.  SIGSTOP's advantage is that it can not be
141 blocked.  A disadvantage is that it is not a real-time signal, so it can only
142 be queued once; we do not keep track of other sources of SIGSTOP.
143 
144 Two other signals that can't be blocked are SIGCONT and SIGKILL.  But we can't
145 use them, because they have special behavior when the signal is generated -
146 not when it is delivered.  SIGCONT resumes the entire thread group and SIGKILL
147 kills the entire thread group.
148 
149 A delivered SIGSTOP would stop the entire thread group, not just the thread we
150 tkill'd.  But we never let the SIGSTOP be delivered; we always intercept and
151 cancel it (by PTRACE_CONT without passing SIGSTOP).
152 
153 We could use a real-time signal instead.  This would solve those problems; we
154 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156 generates it, and there are races with trying to find a signal that is not
157 blocked.  */
158 
159 #ifndef O_LARGEFILE
160 #define O_LARGEFILE 0
161 #endif
162 
163 /* If the system headers did not provide the constants, hard-code the normal
164    values.  */
165 #ifndef PTRACE_EVENT_FORK
166 
167 #define PTRACE_SETOPTIONS	0x4200
168 #define PTRACE_GETEVENTMSG	0x4201
169 
170 /* Options set using PTRACE_SETOPTIONS.  */
171 #define PTRACE_O_TRACESYSGOOD	0x00000001
172 #define PTRACE_O_TRACEFORK	0x00000002
173 #define PTRACE_O_TRACEVFORK	0x00000004
174 #define PTRACE_O_TRACECLONE	0x00000008
175 #define PTRACE_O_TRACEEXEC	0x00000010
176 #define PTRACE_O_TRACEVFORKDONE	0x00000020
177 #define PTRACE_O_TRACEEXIT	0x00000040
178 
179 /* Wait extended result codes for the above trace options.  */
180 #define PTRACE_EVENT_FORK	1
181 #define PTRACE_EVENT_VFORK	2
182 #define PTRACE_EVENT_CLONE	3
183 #define PTRACE_EVENT_EXEC	4
184 #define PTRACE_EVENT_VFORK_DONE	5
185 #define PTRACE_EVENT_EXIT	6
186 
187 #endif /* PTRACE_EVENT_FORK */
188 
189 /* Unlike other extended result codes, WSTOPSIG (status) on
190    PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
191    instead SIGTRAP with bit 7 set.  */
192 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
193 
194 /* We can't always assume that this flag is available, but all systems
195    with the ptrace event handlers also have __WALL, so it's safe to use
196    here.  */
197 #ifndef __WALL
198 #define __WALL          0x40000000 /* Wait for any child.  */
199 #endif
200 
201 #ifndef PTRACE_GETSIGINFO
202 # define PTRACE_GETSIGINFO    0x4202
203 # define PTRACE_SETSIGINFO    0x4203
204 #endif
205 
206 /* The single-threaded native GNU/Linux target_ops.  We save a pointer for
207    the use of the multi-threaded target.  */
208 static struct target_ops *linux_ops;
209 static struct target_ops linux_ops_saved;
210 
211 /* The method to call, if any, when a new thread is attached.  */
212 static void (*linux_nat_new_thread) (ptid_t);
213 
214 /* The method to call, if any, when the siginfo object needs to be
215    converted between the layout returned by ptrace, and the layout in
216    the architecture of the inferior.  */
217 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
218 				       gdb_byte *,
219 				       int);
220 
221 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
222    Called by our to_xfer_partial.  */
223 static LONGEST (*super_xfer_partial) (struct target_ops *,
224 				      enum target_object,
225 				      const char *, gdb_byte *,
226 				      const gdb_byte *,
227 				      ULONGEST, LONGEST);
228 
229 static int debug_linux_nat;
230 static void
231 show_debug_linux_nat (struct ui_file *file, int from_tty,
232 		      struct cmd_list_element *c, const char *value)
233 {
234   fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
235 		    value);
236 }
237 
238 static int debug_linux_nat_async = 0;
239 static void
240 show_debug_linux_nat_async (struct ui_file *file, int from_tty,
241 			    struct cmd_list_element *c, const char *value)
242 {
243   fprintf_filtered (file,
244 		    _("Debugging of GNU/Linux async lwp module is %s.\n"),
245 		    value);
246 }
247 
248 static int disable_randomization = 1;
249 
250 static void
251 show_disable_randomization (struct ui_file *file, int from_tty,
252 			    struct cmd_list_element *c, const char *value)
253 {
254 #ifdef HAVE_PERSONALITY
255   fprintf_filtered (file,
256 		    _("Disabling randomization of debuggee's "
257 		      "virtual address space is %s.\n"),
258 		    value);
259 #else /* !HAVE_PERSONALITY */
260   fputs_filtered (_("Disabling randomization of debuggee's "
261 		    "virtual address space is unsupported on\n"
262 		    "this platform.\n"), file);
263 #endif /* !HAVE_PERSONALITY */
264 }
265 
266 static void
267 set_disable_randomization (char *args, int from_tty,
268 			   struct cmd_list_element *c)
269 {
270 #ifndef HAVE_PERSONALITY
271   error (_("Disabling randomization of debuggee's "
272 	   "virtual address space is unsupported on\n"
273 	   "this platform."));
274 #endif /* !HAVE_PERSONALITY */
275 }
276 
277 struct simple_pid_list
278 {
279   int pid;
280   int status;
281   struct simple_pid_list *next;
282 };
283 struct simple_pid_list *stopped_pids;
284 
285 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
286    can not be used, 1 if it can.  */
287 
288 static int linux_supports_tracefork_flag = -1;
289 
290 /* This variable is a tri-state flag: -1 for unknown, 0 if
291    PTRACE_O_TRACESYSGOOD can not be used, 1 if it can.  */
292 
293 static int linux_supports_tracesysgood_flag = -1;
294 
295 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
296    PTRACE_O_TRACEVFORKDONE.  */
297 
298 static int linux_supports_tracevforkdone_flag = -1;
299 
300 /* Async mode support.  */
301 
302 /* Zero if the async mode, although enabled, is masked, which means
303    linux_nat_wait should behave as if async mode was off.  */
304 static int linux_nat_async_mask_value = 1;
305 
306 /* Stores the current used ptrace() options.  */
307 static int current_ptrace_options = 0;
308 
309 /* The read/write ends of the pipe registered as waitable file in the
310    event loop.  */
311 static int linux_nat_event_pipe[2] = { -1, -1 };
312 
313 /* Flush the event pipe.  */
314 
315 static void
316 async_file_flush (void)
317 {
318   int ret;
319   char buf;
320 
321   do
322     {
323       ret = read (linux_nat_event_pipe[0], &buf, 1);
324     }
325   while (ret >= 0 || (ret == -1 && errno == EINTR));
326 }
327 
328 /* Put something (anything, doesn't matter what, or how much) in event
329    pipe, so that the select/poll in the event-loop realizes we have
330    something to process.  */
331 
332 static void
333 async_file_mark (void)
334 {
335   int ret;
336 
337   /* It doesn't really matter what the pipe contains, as long we end
338      up with something in it.  Might as well flush the previous
339      left-overs.  */
340   async_file_flush ();
341 
342   do
343     {
344       ret = write (linux_nat_event_pipe[1], "+", 1);
345     }
346   while (ret == -1 && errno == EINTR);
347 
348   /* Ignore EAGAIN.  If the pipe is full, the event loop will already
349      be awakened anyway.  */
350 }
351 
352 static void linux_nat_async (void (*callback)
353 			     (enum inferior_event_type event_type,
354 			      void *context),
355 			     void *context);
356 static int linux_nat_async_mask (int mask);
357 static int kill_lwp (int lwpid, int signo);
358 
359 static int stop_callback (struct lwp_info *lp, void *data);
360 
361 static void block_child_signals (sigset_t *prev_mask);
362 static void restore_child_signals_mask (sigset_t *prev_mask);
363 
364 struct lwp_info;
365 static struct lwp_info *add_lwp (ptid_t ptid);
366 static void purge_lwp_list (int pid);
367 static struct lwp_info *find_lwp_pid (ptid_t ptid);
368 
369 
370 /* Trivial list manipulation functions to keep track of a list of
371    new stopped processes.  */
372 static void
373 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
374 {
375   struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
376 
377   new_pid->pid = pid;
378   new_pid->status = status;
379   new_pid->next = *listp;
380   *listp = new_pid;
381 }
382 
383 static int
384 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
385 {
386   struct simple_pid_list **p;
387 
388   for (p = listp; *p != NULL; p = &(*p)->next)
389     if ((*p)->pid == pid)
390       {
391 	struct simple_pid_list *next = (*p)->next;
392 
393 	*statusp = (*p)->status;
394 	xfree (*p);
395 	*p = next;
396 	return 1;
397       }
398   return 0;
399 }
400 
401 static void
402 linux_record_stopped_pid (int pid, int status)
403 {
404   add_to_pid_list (&stopped_pids, pid, status);
405 }
406 
407 
408 /* A helper function for linux_test_for_tracefork, called after fork ().  */
409 
410 static void
411 linux_tracefork_child (void)
412 {
413   ptrace (PTRACE_TRACEME, 0, 0, 0);
414   kill (getpid (), SIGSTOP);
415   fork ();
416   _exit (0);
417 }
418 
419 /* Wrapper function for waitpid which handles EINTR.  */
420 
421 static int
422 my_waitpid (int pid, int *statusp, int flags)
423 {
424   int ret;
425 
426   do
427     {
428       ret = waitpid (pid, statusp, flags);
429     }
430   while (ret == -1 && errno == EINTR);
431 
432   return ret;
433 }
434 
435 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
436 
437    First, we try to enable fork tracing on ORIGINAL_PID.  If this fails,
438    we know that the feature is not available.  This may change the tracing
439    options for ORIGINAL_PID, but we'll be setting them shortly anyway.
440 
441    However, if it succeeds, we don't know for sure that the feature is
442    available; old versions of PTRACE_SETOPTIONS ignored unknown options.  We
443    create a child process, attach to it, use PTRACE_SETOPTIONS to enable
444    fork tracing, and let it fork.  If the process exits, we assume that we
445    can't use TRACEFORK; if we get the fork notification, and we can extract
446    the new child's PID, then we assume that we can.  */
447 
448 static void
449 linux_test_for_tracefork (int original_pid)
450 {
451   int child_pid, ret, status;
452   long second_pid;
453   sigset_t prev_mask;
454 
455   /* We don't want those ptrace calls to be interrupted.  */
456   block_child_signals (&prev_mask);
457 
458   linux_supports_tracefork_flag = 0;
459   linux_supports_tracevforkdone_flag = 0;
460 
461   ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
462   if (ret != 0)
463     {
464       restore_child_signals_mask (&prev_mask);
465       return;
466     }
467 
468   child_pid = fork ();
469   if (child_pid == -1)
470     perror_with_name (("fork"));
471 
472   if (child_pid == 0)
473     linux_tracefork_child ();
474 
475   ret = my_waitpid (child_pid, &status, 0);
476   if (ret == -1)
477     perror_with_name (("waitpid"));
478   else if (ret != child_pid)
479     error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
480   if (! WIFSTOPPED (status))
481     error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
482 	   status);
483 
484   ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
485   if (ret != 0)
486     {
487       ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
488       if (ret != 0)
489 	{
490 	  warning (_("linux_test_for_tracefork: failed to kill child"));
491 	  restore_child_signals_mask (&prev_mask);
492 	  return;
493 	}
494 
495       ret = my_waitpid (child_pid, &status, 0);
496       if (ret != child_pid)
497 	warning (_("linux_test_for_tracefork: failed "
498 		   "to wait for killed child"));
499       else if (!WIFSIGNALED (status))
500 	warning (_("linux_test_for_tracefork: unexpected "
501 		   "wait status 0x%x from killed child"), status);
502 
503       restore_child_signals_mask (&prev_mask);
504       return;
505     }
506 
507   /* Check whether PTRACE_O_TRACEVFORKDONE is available.  */
508   ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
509 		PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
510   linux_supports_tracevforkdone_flag = (ret == 0);
511 
512   ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
513   if (ret != 0)
514     warning (_("linux_test_for_tracefork: failed to resume child"));
515 
516   ret = my_waitpid (child_pid, &status, 0);
517 
518   if (ret == child_pid && WIFSTOPPED (status)
519       && status >> 16 == PTRACE_EVENT_FORK)
520     {
521       second_pid = 0;
522       ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
523       if (ret == 0 && second_pid != 0)
524 	{
525 	  int second_status;
526 
527 	  linux_supports_tracefork_flag = 1;
528 	  my_waitpid (second_pid, &second_status, 0);
529 	  ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
530 	  if (ret != 0)
531 	    warning (_("linux_test_for_tracefork: "
532 		       "failed to kill second child"));
533 	  my_waitpid (second_pid, &status, 0);
534 	}
535     }
536   else
537     warning (_("linux_test_for_tracefork: unexpected result from waitpid "
538 	     "(%d, status 0x%x)"), ret, status);
539 
540   ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
541   if (ret != 0)
542     warning (_("linux_test_for_tracefork: failed to kill child"));
543   my_waitpid (child_pid, &status, 0);
544 
545   restore_child_signals_mask (&prev_mask);
546 }
547 
548 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
549 
550    We try to enable syscall tracing on ORIGINAL_PID.  If this fails,
551    we know that the feature is not available.  This may change the tracing
552    options for ORIGINAL_PID, but we'll be setting them shortly anyway.  */
553 
554 static void
555 linux_test_for_tracesysgood (int original_pid)
556 {
557   int ret;
558   sigset_t prev_mask;
559 
560   /* We don't want those ptrace calls to be interrupted.  */
561   block_child_signals (&prev_mask);
562 
563   linux_supports_tracesysgood_flag = 0;
564 
565   ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
566   if (ret != 0)
567     goto out;
568 
569   linux_supports_tracesysgood_flag = 1;
570 out:
571   restore_child_signals_mask (&prev_mask);
572 }
573 
574 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
575    This function also sets linux_supports_tracesysgood_flag.  */
576 
577 static int
578 linux_supports_tracesysgood (int pid)
579 {
580   if (linux_supports_tracesysgood_flag == -1)
581     linux_test_for_tracesysgood (pid);
582   return linux_supports_tracesysgood_flag;
583 }
584 
585 /* Return non-zero iff we have tracefork functionality available.
586    This function also sets linux_supports_tracefork_flag.  */
587 
588 static int
589 linux_supports_tracefork (int pid)
590 {
591   if (linux_supports_tracefork_flag == -1)
592     linux_test_for_tracefork (pid);
593   return linux_supports_tracefork_flag;
594 }
595 
596 static int
597 linux_supports_tracevforkdone (int pid)
598 {
599   if (linux_supports_tracefork_flag == -1)
600     linux_test_for_tracefork (pid);
601   return linux_supports_tracevforkdone_flag;
602 }
603 
604 static void
605 linux_enable_tracesysgood (ptid_t ptid)
606 {
607   int pid = ptid_get_lwp (ptid);
608 
609   if (pid == 0)
610     pid = ptid_get_pid (ptid);
611 
612   if (linux_supports_tracesysgood (pid) == 0)
613     return;
614 
615   current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
616 
617   ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
618 }
619 
620 
621 void
622 linux_enable_event_reporting (ptid_t ptid)
623 {
624   int pid = ptid_get_lwp (ptid);
625 
626   if (pid == 0)
627     pid = ptid_get_pid (ptid);
628 
629   if (! linux_supports_tracefork (pid))
630     return;
631 
632   current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
633     | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
634 
635   if (linux_supports_tracevforkdone (pid))
636     current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
637 
638   /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
639      read-only process state.  */
640 
641   ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
642 }
643 
644 static void
645 linux_child_post_attach (int pid)
646 {
647   linux_enable_event_reporting (pid_to_ptid (pid));
648   check_for_thread_db ();
649   linux_enable_tracesysgood (pid_to_ptid (pid));
650 }
651 
652 static void
653 linux_child_post_startup_inferior (ptid_t ptid)
654 {
655   linux_enable_event_reporting (ptid);
656   check_for_thread_db ();
657   linux_enable_tracesysgood (ptid);
658 }
659 
660 static int
661 linux_child_follow_fork (struct target_ops *ops, int follow_child)
662 {
663   sigset_t prev_mask;
664   int has_vforked;
665   int parent_pid, child_pid;
666 
667   block_child_signals (&prev_mask);
668 
669   has_vforked = (inferior_thread ()->pending_follow.kind
670 		 == TARGET_WAITKIND_VFORKED);
671   parent_pid = ptid_get_lwp (inferior_ptid);
672   if (parent_pid == 0)
673     parent_pid = ptid_get_pid (inferior_ptid);
674   child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
675 
676   if (!detach_fork)
677     linux_enable_event_reporting (pid_to_ptid (child_pid));
678 
679   if (has_vforked
680       && !non_stop /* Non-stop always resumes both branches.  */
681       && (!target_is_async_p () || sync_execution)
682       && !(follow_child || detach_fork || sched_multi))
683     {
684       /* The parent stays blocked inside the vfork syscall until the
685 	 child execs or exits.  If we don't let the child run, then
686 	 the parent stays blocked.  If we're telling the parent to run
687 	 in the foreground, the user will not be able to ctrl-c to get
688 	 back the terminal, effectively hanging the debug session.  */
689       fprintf_filtered (gdb_stderr, _("\
690 Can not resume the parent process over vfork in the foreground while\n\
691 holding the child stopped.  Try \"set detach-on-fork\" or \
692 \"set schedule-multiple\".\n"));
693       /* FIXME output string > 80 columns.  */
694       return 1;
695     }
696 
697   if (! follow_child)
698     {
699       struct lwp_info *child_lp = NULL;
700 
701       /* We're already attached to the parent, by default.  */
702 
703       /* Detach new forked process?  */
704       if (detach_fork)
705 	{
706 	  /* Before detaching from the child, remove all breakpoints
707 	     from it.  If we forked, then this has already been taken
708 	     care of by infrun.c.  If we vforked however, any
709 	     breakpoint inserted in the parent is visible in the
710 	     child, even those added while stopped in a vfork
711 	     catchpoint.  This will remove the breakpoints from the
712 	     parent also, but they'll be reinserted below.  */
713 	  if (has_vforked)
714 	    {
715 	      /* keep breakpoints list in sync.  */
716 	      remove_breakpoints_pid (GET_PID (inferior_ptid));
717 	    }
718 
719 	  if (info_verbose || debug_linux_nat)
720 	    {
721 	      target_terminal_ours ();
722 	      fprintf_filtered (gdb_stdlog,
723 				"Detaching after fork from "
724 				"child process %d.\n",
725 				child_pid);
726 	    }
727 
728 	  ptrace (PTRACE_DETACH, child_pid, 0, 0);
729 	}
730       else
731 	{
732 	  struct inferior *parent_inf, *child_inf;
733 	  struct cleanup *old_chain;
734 
735 	  /* Add process to GDB's tables.  */
736 	  child_inf = add_inferior (child_pid);
737 
738 	  parent_inf = current_inferior ();
739 	  child_inf->attach_flag = parent_inf->attach_flag;
740 	  copy_terminal_info (child_inf, parent_inf);
741 
742 	  old_chain = save_inferior_ptid ();
743 	  save_current_program_space ();
744 
745 	  inferior_ptid = ptid_build (child_pid, child_pid, 0);
746 	  add_thread (inferior_ptid);
747 	  child_lp = add_lwp (inferior_ptid);
748 	  child_lp->stopped = 1;
749 	  child_lp->resumed = 1;
750 
751 	  /* If this is a vfork child, then the address-space is
752 	     shared with the parent.  */
753 	  if (has_vforked)
754 	    {
755 	      child_inf->pspace = parent_inf->pspace;
756 	      child_inf->aspace = parent_inf->aspace;
757 
758 	      /* The parent will be frozen until the child is done
759 		 with the shared region.  Keep track of the
760 		 parent.  */
761 	      child_inf->vfork_parent = parent_inf;
762 	      child_inf->pending_detach = 0;
763 	      parent_inf->vfork_child = child_inf;
764 	      parent_inf->pending_detach = 0;
765 	    }
766 	  else
767 	    {
768 	      child_inf->aspace = new_address_space ();
769 	      child_inf->pspace = add_program_space (child_inf->aspace);
770 	      child_inf->removable = 1;
771 	      set_current_program_space (child_inf->pspace);
772 	      clone_program_space (child_inf->pspace, parent_inf->pspace);
773 
774 	      /* Let the shared library layer (solib-svr4) learn about
775 		 this new process, relocate the cloned exec, pull in
776 		 shared libraries, and install the solib event
777 		 breakpoint.  If a "cloned-VM" event was propagated
778 		 better throughout the core, this wouldn't be
779 		 required.  */
780 	      solib_create_inferior_hook (0);
781 	    }
782 
783 	  /* Let the thread_db layer learn about this new process.  */
784 	  check_for_thread_db ();
785 
786 	  do_cleanups (old_chain);
787 	}
788 
789       if (has_vforked)
790 	{
791 	  struct lwp_info *lp;
792 	  struct inferior *parent_inf;
793 
794 	  parent_inf = current_inferior ();
795 
796 	  /* If we detached from the child, then we have to be careful
797 	     to not insert breakpoints in the parent until the child
798 	     is done with the shared memory region.  However, if we're
799 	     staying attached to the child, then we can and should
800 	     insert breakpoints, so that we can debug it.  A
801 	     subsequent child exec or exit is enough to know when does
802 	     the child stops using the parent's address space.  */
803 	  parent_inf->waiting_for_vfork_done = detach_fork;
804 	  parent_inf->pspace->breakpoints_not_allowed = detach_fork;
805 
806 	  lp = find_lwp_pid (pid_to_ptid (parent_pid));
807 	  gdb_assert (linux_supports_tracefork_flag >= 0);
808 	  if (linux_supports_tracevforkdone (0))
809 	    {
810   	      if (debug_linux_nat)
811   		fprintf_unfiltered (gdb_stdlog,
812   				    "LCFF: waiting for VFORK_DONE on %d\n",
813   				    parent_pid);
814 
815 	      lp->stopped = 1;
816 	      lp->resumed = 1;
817 
818 	      /* We'll handle the VFORK_DONE event like any other
819 		 event, in target_wait.  */
820 	    }
821 	  else
822 	    {
823 	      /* We can't insert breakpoints until the child has
824 		 finished with the shared memory region.  We need to
825 		 wait until that happens.  Ideal would be to just
826 		 call:
827 		 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
828 		 - waitpid (parent_pid, &status, __WALL);
829 		 However, most architectures can't handle a syscall
830 		 being traced on the way out if it wasn't traced on
831 		 the way in.
832 
833 		 We might also think to loop, continuing the child
834 		 until it exits or gets a SIGTRAP.  One problem is
835 		 that the child might call ptrace with PTRACE_TRACEME.
836 
837 		 There's no simple and reliable way to figure out when
838 		 the vforked child will be done with its copy of the
839 		 shared memory.  We could step it out of the syscall,
840 		 two instructions, let it go, and then single-step the
841 		 parent once.  When we have hardware single-step, this
842 		 would work; with software single-step it could still
843 		 be made to work but we'd have to be able to insert
844 		 single-step breakpoints in the child, and we'd have
845 		 to insert -just- the single-step breakpoint in the
846 		 parent.  Very awkward.
847 
848 		 In the end, the best we can do is to make sure it
849 		 runs for a little while.  Hopefully it will be out of
850 		 range of any breakpoints we reinsert.  Usually this
851 		 is only the single-step breakpoint at vfork's return
852 		 point.  */
853 
854   	      if (debug_linux_nat)
855   		fprintf_unfiltered (gdb_stdlog,
856 				    "LCFF: no VFORK_DONE "
857 				    "support, sleeping a bit\n");
858 
859 	      usleep (10000);
860 
861 	      /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
862 		 and leave it pending.  The next linux_nat_resume call
863 		 will notice a pending event, and bypasses actually
864 		 resuming the inferior.  */
865 	      lp->status = 0;
866 	      lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
867 	      lp->stopped = 0;
868 	      lp->resumed = 1;
869 
870 	      /* If we're in async mode, need to tell the event loop
871 		 there's something here to process.  */
872 	      if (target_can_async_p ())
873 		async_file_mark ();
874 	    }
875 	}
876     }
877   else
878     {
879       struct inferior *parent_inf, *child_inf;
880       struct lwp_info *lp;
881       struct program_space *parent_pspace;
882 
883       if (info_verbose || debug_linux_nat)
884 	{
885 	  target_terminal_ours ();
886 	  if (has_vforked)
887 	    fprintf_filtered (gdb_stdlog,
888 			      _("Attaching after process %d "
889 				"vfork to child process %d.\n"),
890 			      parent_pid, child_pid);
891 	  else
892 	    fprintf_filtered (gdb_stdlog,
893 			      _("Attaching after process %d "
894 				"fork to child process %d.\n"),
895 			      parent_pid, child_pid);
896 	}
897 
898       /* Add the new inferior first, so that the target_detach below
899 	 doesn't unpush the target.  */
900 
901       child_inf = add_inferior (child_pid);
902 
903       parent_inf = current_inferior ();
904       child_inf->attach_flag = parent_inf->attach_flag;
905       copy_terminal_info (child_inf, parent_inf);
906 
907       parent_pspace = parent_inf->pspace;
908 
909       /* If we're vforking, we want to hold on to the parent until the
910 	 child exits or execs.  At child exec or exit time we can
911 	 remove the old breakpoints from the parent and detach or
912 	 resume debugging it.  Otherwise, detach the parent now; we'll
913 	 want to reuse it's program/address spaces, but we can't set
914 	 them to the child before removing breakpoints from the
915 	 parent, otherwise, the breakpoints module could decide to
916 	 remove breakpoints from the wrong process (since they'd be
917 	 assigned to the same address space).  */
918 
919       if (has_vforked)
920 	{
921 	  gdb_assert (child_inf->vfork_parent == NULL);
922 	  gdb_assert (parent_inf->vfork_child == NULL);
923 	  child_inf->vfork_parent = parent_inf;
924 	  child_inf->pending_detach = 0;
925 	  parent_inf->vfork_child = child_inf;
926 	  parent_inf->pending_detach = detach_fork;
927 	  parent_inf->waiting_for_vfork_done = 0;
928 	}
929       else if (detach_fork)
930 	target_detach (NULL, 0);
931 
932       /* Note that the detach above makes PARENT_INF dangling.  */
933 
934       /* Add the child thread to the appropriate lists, and switch to
935 	 this new thread, before cloning the program space, and
936 	 informing the solib layer about this new process.  */
937 
938       inferior_ptid = ptid_build (child_pid, child_pid, 0);
939       add_thread (inferior_ptid);
940       lp = add_lwp (inferior_ptid);
941       lp->stopped = 1;
942       lp->resumed = 1;
943 
944       /* If this is a vfork child, then the address-space is shared
945 	 with the parent.  If we detached from the parent, then we can
946 	 reuse the parent's program/address spaces.  */
947       if (has_vforked || detach_fork)
948 	{
949 	  child_inf->pspace = parent_pspace;
950 	  child_inf->aspace = child_inf->pspace->aspace;
951 	}
952       else
953 	{
954 	  child_inf->aspace = new_address_space ();
955 	  child_inf->pspace = add_program_space (child_inf->aspace);
956 	  child_inf->removable = 1;
957 	  set_current_program_space (child_inf->pspace);
958 	  clone_program_space (child_inf->pspace, parent_pspace);
959 
960 	  /* Let the shared library layer (solib-svr4) learn about
961 	     this new process, relocate the cloned exec, pull in
962 	     shared libraries, and install the solib event breakpoint.
963 	     If a "cloned-VM" event was propagated better throughout
964 	     the core, this wouldn't be required.  */
965 	  solib_create_inferior_hook (0);
966 	}
967 
968       /* Let the thread_db layer learn about this new process.  */
969       check_for_thread_db ();
970     }
971 
972   restore_child_signals_mask (&prev_mask);
973   return 0;
974 }
975 
976 
977 static int
978 linux_child_insert_fork_catchpoint (int pid)
979 {
980   return !linux_supports_tracefork (pid);
981 }
982 
983 static int
984 linux_child_remove_fork_catchpoint (int pid)
985 {
986   return 0;
987 }
988 
989 static int
990 linux_child_insert_vfork_catchpoint (int pid)
991 {
992   return !linux_supports_tracefork (pid);
993 }
994 
995 static int
996 linux_child_remove_vfork_catchpoint (int pid)
997 {
998   return 0;
999 }
1000 
1001 static int
1002 linux_child_insert_exec_catchpoint (int pid)
1003 {
1004   return !linux_supports_tracefork (pid);
1005 }
1006 
1007 static int
1008 linux_child_remove_exec_catchpoint (int pid)
1009 {
1010   return 0;
1011 }
1012 
1013 static int
1014 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
1015 				    int table_size, int *table)
1016 {
1017   if (!linux_supports_tracesysgood (pid))
1018     return 1;
1019 
1020   /* On GNU/Linux, we ignore the arguments.  It means that we only
1021      enable the syscall catchpoints, but do not disable them.
1022 
1023      Also, we do not use the `table' information because we do not
1024      filter system calls here.  We let GDB do the logic for us.  */
1025   return 0;
1026 }
1027 
1028 /* On GNU/Linux there are no real LWP's.  The closest thing to LWP's
1029    are processes sharing the same VM space.  A multi-threaded process
1030    is basically a group of such processes.  However, such a grouping
1031    is almost entirely a user-space issue; the kernel doesn't enforce
1032    such a grouping at all (this might change in the future).  In
1033    general, we'll rely on the threads library (i.e. the GNU/Linux
1034    Threads library) to provide such a grouping.
1035 
1036    It is perfectly well possible to write a multi-threaded application
1037    without the assistance of a threads library, by using the clone
1038    system call directly.  This module should be able to give some
1039    rudimentary support for debugging such applications if developers
1040    specify the CLONE_PTRACE flag in the clone system call, and are
1041    using the Linux kernel 2.4 or above.
1042 
1043    Note that there are some peculiarities in GNU/Linux that affect
1044    this code:
1045 
1046    - In general one should specify the __WCLONE flag to waitpid in
1047      order to make it report events for any of the cloned processes
1048      (and leave it out for the initial process).  However, if a cloned
1049      process has exited the exit status is only reported if the
1050      __WCLONE flag is absent.  Linux kernel 2.4 has a __WALL flag, but
1051      we cannot use it since GDB must work on older systems too.
1052 
1053    - When a traced, cloned process exits and is waited for by the
1054      debugger, the kernel reassigns it to the original parent and
1055      keeps it around as a "zombie".  Somehow, the GNU/Linux Threads
1056      library doesn't notice this, which leads to the "zombie problem":
1057      When debugged a multi-threaded process that spawns a lot of
1058      threads will run out of processes, even if the threads exit,
1059      because the "zombies" stay around.  */
1060 
1061 /* List of known LWPs.  */
1062 struct lwp_info *lwp_list;
1063 
1064 
1065 /* Original signal mask.  */
1066 static sigset_t normal_mask;
1067 
1068 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1069    _initialize_linux_nat.  */
1070 static sigset_t suspend_mask;
1071 
1072 /* Signals to block to make that sigsuspend work.  */
1073 static sigset_t blocked_mask;
1074 
1075 /* SIGCHLD action.  */
1076 struct sigaction sigchld_action;
1077 
1078 /* Block child signals (SIGCHLD and linux threads signals), and store
1079    the previous mask in PREV_MASK.  */
1080 
1081 static void
1082 block_child_signals (sigset_t *prev_mask)
1083 {
1084   /* Make sure SIGCHLD is blocked.  */
1085   if (!sigismember (&blocked_mask, SIGCHLD))
1086     sigaddset (&blocked_mask, SIGCHLD);
1087 
1088   sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1089 }
1090 
1091 /* Restore child signals mask, previously returned by
1092    block_child_signals.  */
1093 
1094 static void
1095 restore_child_signals_mask (sigset_t *prev_mask)
1096 {
1097   sigprocmask (SIG_SETMASK, prev_mask, NULL);
1098 }
1099 
1100 
1101 /* Prototypes for local functions.  */
1102 static int stop_wait_callback (struct lwp_info *lp, void *data);
1103 static int linux_thread_alive (ptid_t ptid);
1104 static char *linux_child_pid_to_exec_file (int pid);
1105 
1106 
1107 /* Convert wait status STATUS to a string.  Used for printing debug
1108    messages only.  */
1109 
1110 static char *
1111 status_to_str (int status)
1112 {
1113   static char buf[64];
1114 
1115   if (WIFSTOPPED (status))
1116     {
1117       if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1118 	snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1119 		  strsignal (SIGTRAP));
1120       else
1121 	snprintf (buf, sizeof (buf), "%s (stopped)",
1122 		  strsignal (WSTOPSIG (status)));
1123     }
1124   else if (WIFSIGNALED (status))
1125     snprintf (buf, sizeof (buf), "%s (terminated)",
1126 	      strsignal (WTERMSIG (status)));
1127   else
1128     snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1129 
1130   return buf;
1131 }
1132 
1133 /* Remove all LWPs belong to PID from the lwp list.  */
1134 
1135 static void
1136 purge_lwp_list (int pid)
1137 {
1138   struct lwp_info *lp, *lpprev, *lpnext;
1139 
1140   lpprev = NULL;
1141 
1142   for (lp = lwp_list; lp; lp = lpnext)
1143     {
1144       lpnext = lp->next;
1145 
1146       if (ptid_get_pid (lp->ptid) == pid)
1147 	{
1148 	  if (lp == lwp_list)
1149 	    lwp_list = lp->next;
1150 	  else
1151 	    lpprev->next = lp->next;
1152 
1153 	  xfree (lp);
1154 	}
1155       else
1156 	lpprev = lp;
1157     }
1158 }
1159 
1160 /* Return the number of known LWPs in the tgid given by PID.  */
1161 
1162 static int
1163 num_lwps (int pid)
1164 {
1165   int count = 0;
1166   struct lwp_info *lp;
1167 
1168   for (lp = lwp_list; lp; lp = lp->next)
1169     if (ptid_get_pid (lp->ptid) == pid)
1170       count++;
1171 
1172   return count;
1173 }
1174 
1175 /* Add the LWP specified by PID to the list.  Return a pointer to the
1176    structure describing the new LWP.  The LWP should already be stopped
1177    (with an exception for the very first LWP).  */
1178 
1179 static struct lwp_info *
1180 add_lwp (ptid_t ptid)
1181 {
1182   struct lwp_info *lp;
1183 
1184   gdb_assert (is_lwp (ptid));
1185 
1186   lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1187 
1188   memset (lp, 0, sizeof (struct lwp_info));
1189 
1190   lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1191 
1192   lp->ptid = ptid;
1193   lp->core = -1;
1194 
1195   lp->next = lwp_list;
1196   lwp_list = lp;
1197 
1198   if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1199     linux_nat_new_thread (ptid);
1200 
1201   return lp;
1202 }
1203 
1204 /* Remove the LWP specified by PID from the list.  */
1205 
1206 static void
1207 delete_lwp (ptid_t ptid)
1208 {
1209   struct lwp_info *lp, *lpprev;
1210 
1211   lpprev = NULL;
1212 
1213   for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1214     if (ptid_equal (lp->ptid, ptid))
1215       break;
1216 
1217   if (!lp)
1218     return;
1219 
1220   if (lpprev)
1221     lpprev->next = lp->next;
1222   else
1223     lwp_list = lp->next;
1224 
1225   xfree (lp);
1226 }
1227 
1228 /* Return a pointer to the structure describing the LWP corresponding
1229    to PID.  If no corresponding LWP could be found, return NULL.  */
1230 
1231 static struct lwp_info *
1232 find_lwp_pid (ptid_t ptid)
1233 {
1234   struct lwp_info *lp;
1235   int lwp;
1236 
1237   if (is_lwp (ptid))
1238     lwp = GET_LWP (ptid);
1239   else
1240     lwp = GET_PID (ptid);
1241 
1242   for (lp = lwp_list; lp; lp = lp->next)
1243     if (lwp == GET_LWP (lp->ptid))
1244       return lp;
1245 
1246   return NULL;
1247 }
1248 
1249 /* Call CALLBACK with its second argument set to DATA for every LWP in
1250    the list.  If CALLBACK returns 1 for a particular LWP, return a
1251    pointer to the structure describing that LWP immediately.
1252    Otherwise return NULL.  */
1253 
1254 struct lwp_info *
1255 iterate_over_lwps (ptid_t filter,
1256 		   int (*callback) (struct lwp_info *, void *),
1257 		   void *data)
1258 {
1259   struct lwp_info *lp, *lpnext;
1260 
1261   for (lp = lwp_list; lp; lp = lpnext)
1262     {
1263       lpnext = lp->next;
1264 
1265       if (ptid_match (lp->ptid, filter))
1266 	{
1267 	  if ((*callback) (lp, data))
1268 	    return lp;
1269 	}
1270     }
1271 
1272   return NULL;
1273 }
1274 
1275 /* Update our internal state when changing from one checkpoint to
1276    another indicated by NEW_PTID.  We can only switch single-threaded
1277    applications, so we only create one new LWP, and the previous list
1278    is discarded.  */
1279 
1280 void
1281 linux_nat_switch_fork (ptid_t new_ptid)
1282 {
1283   struct lwp_info *lp;
1284 
1285   purge_lwp_list (GET_PID (inferior_ptid));
1286 
1287   lp = add_lwp (new_ptid);
1288   lp->stopped = 1;
1289 
1290   /* This changes the thread's ptid while preserving the gdb thread
1291      num.  Also changes the inferior pid, while preserving the
1292      inferior num.  */
1293   thread_change_ptid (inferior_ptid, new_ptid);
1294 
1295   /* We've just told GDB core that the thread changed target id, but,
1296      in fact, it really is a different thread, with different register
1297      contents.  */
1298   registers_changed ();
1299 }
1300 
1301 /* Handle the exit of a single thread LP.  */
1302 
1303 static void
1304 exit_lwp (struct lwp_info *lp)
1305 {
1306   struct thread_info *th = find_thread_ptid (lp->ptid);
1307 
1308   if (th)
1309     {
1310       if (print_thread_events)
1311 	printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1312 
1313       delete_thread (lp->ptid);
1314     }
1315 
1316   delete_lwp (lp->ptid);
1317 }
1318 
1319 /* Return an lwp's tgid, found in `/proc/PID/status'.  */
1320 
1321 int
1322 linux_proc_get_tgid (int lwpid)
1323 {
1324   FILE *status_file;
1325   char buf[100];
1326   int tgid = -1;
1327 
1328   snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1329   status_file = fopen (buf, "r");
1330   if (status_file != NULL)
1331     {
1332       while (fgets (buf, sizeof (buf), status_file))
1333 	{
1334 	  if (strncmp (buf, "Tgid:", 5) == 0)
1335 	    {
1336 	      tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1337 	      break;
1338 	    }
1339 	}
1340 
1341       fclose (status_file);
1342     }
1343 
1344   return tgid;
1345 }
1346 
1347 /* Detect `T (stopped)' in `/proc/PID/status'.
1348    Other states including `T (tracing stop)' are reported as false.  */
1349 
1350 static int
1351 pid_is_stopped (pid_t pid)
1352 {
1353   FILE *status_file;
1354   char buf[100];
1355   int retval = 0;
1356 
1357   snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1358   status_file = fopen (buf, "r");
1359   if (status_file != NULL)
1360     {
1361       int have_state = 0;
1362 
1363       while (fgets (buf, sizeof (buf), status_file))
1364 	{
1365 	  if (strncmp (buf, "State:", 6) == 0)
1366 	    {
1367 	      have_state = 1;
1368 	      break;
1369 	    }
1370 	}
1371       if (have_state && strstr (buf, "T (stopped)") != NULL)
1372 	retval = 1;
1373       fclose (status_file);
1374     }
1375   return retval;
1376 }
1377 
1378 /* Wait for the LWP specified by LP, which we have just attached to.
1379    Returns a wait status for that LWP, to cache.  */
1380 
1381 static int
1382 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1383 			    int *signalled)
1384 {
1385   pid_t new_pid, pid = GET_LWP (ptid);
1386   int status;
1387 
1388   if (pid_is_stopped (pid))
1389     {
1390       if (debug_linux_nat)
1391 	fprintf_unfiltered (gdb_stdlog,
1392 			    "LNPAW: Attaching to a stopped process\n");
1393 
1394       /* The process is definitely stopped.  It is in a job control
1395 	 stop, unless the kernel predates the TASK_STOPPED /
1396 	 TASK_TRACED distinction, in which case it might be in a
1397 	 ptrace stop.  Make sure it is in a ptrace stop; from there we
1398 	 can kill it, signal it, et cetera.
1399 
1400          First make sure there is a pending SIGSTOP.  Since we are
1401 	 already attached, the process can not transition from stopped
1402 	 to running without a PTRACE_CONT; so we know this signal will
1403 	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
1404 	 probably already in the queue (unless this kernel is old
1405 	 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1406 	 is not an RT signal, it can only be queued once.  */
1407       kill_lwp (pid, SIGSTOP);
1408 
1409       /* Finally, resume the stopped process.  This will deliver the SIGSTOP
1410 	 (or a higher priority signal, just like normal PTRACE_ATTACH).  */
1411       ptrace (PTRACE_CONT, pid, 0, 0);
1412     }
1413 
1414   /* Make sure the initial process is stopped.  The user-level threads
1415      layer might want to poke around in the inferior, and that won't
1416      work if things haven't stabilized yet.  */
1417   new_pid = my_waitpid (pid, &status, 0);
1418   if (new_pid == -1 && errno == ECHILD)
1419     {
1420       if (first)
1421 	warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1422 
1423       /* Try again with __WCLONE to check cloned processes.  */
1424       new_pid = my_waitpid (pid, &status, __WCLONE);
1425       *cloned = 1;
1426     }
1427 
1428   gdb_assert (pid == new_pid);
1429 
1430   if (!WIFSTOPPED (status))
1431     {
1432       /* The pid we tried to attach has apparently just exited.  */
1433       if (debug_linux_nat)
1434 	fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1435 			    pid, status_to_str (status));
1436       return status;
1437     }
1438 
1439   if (WSTOPSIG (status) != SIGSTOP)
1440     {
1441       *signalled = 1;
1442       if (debug_linux_nat)
1443 	fprintf_unfiltered (gdb_stdlog,
1444 			    "LNPAW: Received %s after attaching\n",
1445 			    status_to_str (status));
1446     }
1447 
1448   return status;
1449 }
1450 
1451 /* Attach to the LWP specified by PID.  Return 0 if successful or -1
1452    if the new LWP could not be attached.  */
1453 
1454 int
1455 lin_lwp_attach_lwp (ptid_t ptid)
1456 {
1457   struct lwp_info *lp;
1458   sigset_t prev_mask;
1459 
1460   gdb_assert (is_lwp (ptid));
1461 
1462   block_child_signals (&prev_mask);
1463 
1464   lp = find_lwp_pid (ptid);
1465 
1466   /* We assume that we're already attached to any LWP that has an id
1467      equal to the overall process id, and to any LWP that is already
1468      in our list of LWPs.  If we're not seeing exit events from threads
1469      and we've had PID wraparound since we last tried to stop all threads,
1470      this assumption might be wrong; fortunately, this is very unlikely
1471      to happen.  */
1472   if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
1473     {
1474       int status, cloned = 0, signalled = 0;
1475 
1476       if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
1477 	{
1478 	  /* If we fail to attach to the thread, issue a warning,
1479 	     but continue.  One way this can happen is if thread
1480 	     creation is interrupted; as of Linux kernel 2.6.19, a
1481 	     bug may place threads in the thread list and then fail
1482 	     to create them.  */
1483 	  warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1484 		   safe_strerror (errno));
1485 	  restore_child_signals_mask (&prev_mask);
1486 	  return -1;
1487 	}
1488 
1489       if (debug_linux_nat)
1490 	fprintf_unfiltered (gdb_stdlog,
1491 			    "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1492 			    target_pid_to_str (ptid));
1493 
1494       status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1495       if (!WIFSTOPPED (status))
1496 	return -1;
1497 
1498       lp = add_lwp (ptid);
1499       lp->stopped = 1;
1500       lp->cloned = cloned;
1501       lp->signalled = signalled;
1502       if (WSTOPSIG (status) != SIGSTOP)
1503 	{
1504 	  lp->resumed = 1;
1505 	  lp->status = status;
1506 	}
1507 
1508       target_post_attach (GET_LWP (lp->ptid));
1509 
1510       if (debug_linux_nat)
1511 	{
1512 	  fprintf_unfiltered (gdb_stdlog,
1513 			      "LLAL: waitpid %s received %s\n",
1514 			      target_pid_to_str (ptid),
1515 			      status_to_str (status));
1516 	}
1517     }
1518   else
1519     {
1520       /* We assume that the LWP representing the original process is
1521          already stopped.  Mark it as stopped in the data structure
1522          that the GNU/linux ptrace layer uses to keep track of
1523          threads.  Note that this won't have already been done since
1524          the main thread will have, we assume, been stopped by an
1525          attach from a different layer.  */
1526       if (lp == NULL)
1527 	lp = add_lwp (ptid);
1528       lp->stopped = 1;
1529     }
1530 
1531   restore_child_signals_mask (&prev_mask);
1532   return 0;
1533 }
1534 
1535 static void
1536 linux_nat_create_inferior (struct target_ops *ops,
1537 			   char *exec_file, char *allargs, char **env,
1538 			   int from_tty)
1539 {
1540 #ifdef HAVE_PERSONALITY
1541   int personality_orig = 0, personality_set = 0;
1542 #endif /* HAVE_PERSONALITY */
1543 
1544   /* The fork_child mechanism is synchronous and calls target_wait, so
1545      we have to mask the async mode.  */
1546 
1547 #ifdef HAVE_PERSONALITY
1548   if (disable_randomization)
1549     {
1550       errno = 0;
1551       personality_orig = personality (0xffffffff);
1552       if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1553 	{
1554 	  personality_set = 1;
1555 	  personality (personality_orig | ADDR_NO_RANDOMIZE);
1556 	}
1557       if (errno != 0 || (personality_set
1558 			 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1559 	warning (_("Error disabling address space randomization: %s"),
1560 		 safe_strerror (errno));
1561     }
1562 #endif /* HAVE_PERSONALITY */
1563 
1564   linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1565 
1566 #ifdef HAVE_PERSONALITY
1567   if (personality_set)
1568     {
1569       errno = 0;
1570       personality (personality_orig);
1571       if (errno != 0)
1572 	warning (_("Error restoring address space randomization: %s"),
1573 		 safe_strerror (errno));
1574     }
1575 #endif /* HAVE_PERSONALITY */
1576 }
1577 
1578 static void
1579 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1580 {
1581   struct lwp_info *lp;
1582   int status;
1583   ptid_t ptid;
1584 
1585   linux_ops->to_attach (ops, args, from_tty);
1586 
1587   /* The ptrace base target adds the main thread with (pid,0,0)
1588      format.  Decorate it with lwp info.  */
1589   ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1590   thread_change_ptid (inferior_ptid, ptid);
1591 
1592   /* Add the initial process as the first LWP to the list.  */
1593   lp = add_lwp (ptid);
1594 
1595   status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1596 				       &lp->signalled);
1597   if (!WIFSTOPPED (status))
1598     {
1599       if (WIFEXITED (status))
1600 	{
1601 	  int exit_code = WEXITSTATUS (status);
1602 
1603 	  target_terminal_ours ();
1604 	  target_mourn_inferior ();
1605 	  if (exit_code == 0)
1606 	    error (_("Unable to attach: program exited normally."));
1607 	  else
1608 	    error (_("Unable to attach: program exited with code %d."),
1609 		   exit_code);
1610 	}
1611       else if (WIFSIGNALED (status))
1612 	{
1613 	  enum target_signal signo;
1614 
1615 	  target_terminal_ours ();
1616 	  target_mourn_inferior ();
1617 
1618 	  signo = target_signal_from_host (WTERMSIG (status));
1619 	  error (_("Unable to attach: program terminated with signal "
1620 		   "%s, %s."),
1621 		 target_signal_to_name (signo),
1622 		 target_signal_to_string (signo));
1623 	}
1624 
1625       internal_error (__FILE__, __LINE__,
1626 		      _("unexpected status %d for PID %ld"),
1627 		      status, (long) GET_LWP (ptid));
1628     }
1629 
1630   lp->stopped = 1;
1631 
1632   /* Save the wait status to report later.  */
1633   lp->resumed = 1;
1634   if (debug_linux_nat)
1635     fprintf_unfiltered (gdb_stdlog,
1636 			"LNA: waitpid %ld, saving status %s\n",
1637 			(long) GET_PID (lp->ptid), status_to_str (status));
1638 
1639   lp->status = status;
1640 
1641   if (target_can_async_p ())
1642     target_async (inferior_event_handler, 0);
1643 }
1644 
1645 /* Get pending status of LP.  */
1646 static int
1647 get_pending_status (struct lwp_info *lp, int *status)
1648 {
1649   enum target_signal signo = TARGET_SIGNAL_0;
1650 
1651   /* If we paused threads momentarily, we may have stored pending
1652      events in lp->status or lp->waitstatus (see stop_wait_callback),
1653      and GDB core hasn't seen any signal for those threads.
1654      Otherwise, the last signal reported to the core is found in the
1655      thread object's stop_signal.
1656 
1657      There's a corner case that isn't handled here at present.  Only
1658      if the thread stopped with a TARGET_WAITKIND_STOPPED does
1659      stop_signal make sense as a real signal to pass to the inferior.
1660      Some catchpoint related events, like
1661      TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1662      to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers.  But,
1663      those traps are debug API (ptrace in our case) related and
1664      induced; the inferior wouldn't see them if it wasn't being
1665      traced.  Hence, we should never pass them to the inferior, even
1666      when set to pass state.  Since this corner case isn't handled by
1667      infrun.c when proceeding with a signal, for consistency, neither
1668      do we handle it here (or elsewhere in the file we check for
1669      signal pass state).  Normally SIGTRAP isn't set to pass state, so
1670      this is really a corner case.  */
1671 
1672   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1673     signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal.  */
1674   else if (lp->status)
1675     signo = target_signal_from_host (WSTOPSIG (lp->status));
1676   else if (non_stop && !is_executing (lp->ptid))
1677     {
1678       struct thread_info *tp = find_thread_ptid (lp->ptid);
1679 
1680       signo = tp->suspend.stop_signal;
1681     }
1682   else if (!non_stop)
1683     {
1684       struct target_waitstatus last;
1685       ptid_t last_ptid;
1686 
1687       get_last_target_status (&last_ptid, &last);
1688 
1689       if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1690 	{
1691 	  struct thread_info *tp = find_thread_ptid (lp->ptid);
1692 
1693 	  signo = tp->suspend.stop_signal;
1694 	}
1695     }
1696 
1697   *status = 0;
1698 
1699   if (signo == TARGET_SIGNAL_0)
1700     {
1701       if (debug_linux_nat)
1702 	fprintf_unfiltered (gdb_stdlog,
1703 			    "GPT: lwp %s has no pending signal\n",
1704 			    target_pid_to_str (lp->ptid));
1705     }
1706   else if (!signal_pass_state (signo))
1707     {
1708       if (debug_linux_nat)
1709 	fprintf_unfiltered (gdb_stdlog,
1710 			    "GPT: lwp %s had signal %s, "
1711 			    "but it is in no pass state\n",
1712 			    target_pid_to_str (lp->ptid),
1713 			    target_signal_to_string (signo));
1714     }
1715   else
1716     {
1717       *status = W_STOPCODE (target_signal_to_host (signo));
1718 
1719       if (debug_linux_nat)
1720 	fprintf_unfiltered (gdb_stdlog,
1721 			    "GPT: lwp %s has pending signal %s\n",
1722 			    target_pid_to_str (lp->ptid),
1723 			    target_signal_to_string (signo));
1724     }
1725 
1726   return 0;
1727 }
1728 
1729 static int
1730 detach_callback (struct lwp_info *lp, void *data)
1731 {
1732   gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1733 
1734   if (debug_linux_nat && lp->status)
1735     fprintf_unfiltered (gdb_stdlog, "DC:  Pending %s for %s on detach.\n",
1736 			strsignal (WSTOPSIG (lp->status)),
1737 			target_pid_to_str (lp->ptid));
1738 
1739   /* If there is a pending SIGSTOP, get rid of it.  */
1740   if (lp->signalled)
1741     {
1742       if (debug_linux_nat)
1743 	fprintf_unfiltered (gdb_stdlog,
1744 			    "DC: Sending SIGCONT to %s\n",
1745 			    target_pid_to_str (lp->ptid));
1746 
1747       kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1748       lp->signalled = 0;
1749     }
1750 
1751   /* We don't actually detach from the LWP that has an id equal to the
1752      overall process id just yet.  */
1753   if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1754     {
1755       int status = 0;
1756 
1757       /* Pass on any pending signal for this LWP.  */
1758       get_pending_status (lp, &status);
1759 
1760       errno = 0;
1761       if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1762 		  WSTOPSIG (status)) < 0)
1763 	error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1764 	       safe_strerror (errno));
1765 
1766       if (debug_linux_nat)
1767 	fprintf_unfiltered (gdb_stdlog,
1768 			    "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1769 			    target_pid_to_str (lp->ptid),
1770 			    strsignal (WSTOPSIG (status)));
1771 
1772       delete_lwp (lp->ptid);
1773     }
1774 
1775   return 0;
1776 }
1777 
1778 static void
1779 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1780 {
1781   int pid;
1782   int status;
1783   struct lwp_info *main_lwp;
1784 
1785   pid = GET_PID (inferior_ptid);
1786 
1787   if (target_can_async_p ())
1788     linux_nat_async (NULL, 0);
1789 
1790   /* Stop all threads before detaching.  ptrace requires that the
1791      thread is stopped to sucessfully detach.  */
1792   iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1793   /* ... and wait until all of them have reported back that
1794      they're no longer running.  */
1795   iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1796 
1797   iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1798 
1799   /* Only the initial process should be left right now.  */
1800   gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1801 
1802   main_lwp = find_lwp_pid (pid_to_ptid (pid));
1803 
1804   /* Pass on any pending signal for the last LWP.  */
1805   if ((args == NULL || *args == '\0')
1806       && get_pending_status (main_lwp, &status) != -1
1807       && WIFSTOPPED (status))
1808     {
1809       /* Put the signal number in ARGS so that inf_ptrace_detach will
1810 	 pass it along with PTRACE_DETACH.  */
1811       args = alloca (8);
1812       sprintf (args, "%d", (int) WSTOPSIG (status));
1813       if (debug_linux_nat)
1814 	fprintf_unfiltered (gdb_stdlog,
1815 			    "LND: Sending signal %s to %s\n",
1816 			    args,
1817 			    target_pid_to_str (main_lwp->ptid));
1818     }
1819 
1820   delete_lwp (main_lwp->ptid);
1821 
1822   if (forks_exist_p ())
1823     {
1824       /* Multi-fork case.  The current inferior_ptid is being detached
1825 	 from, but there are other viable forks to debug.  Detach from
1826 	 the current fork, and context-switch to the first
1827 	 available.  */
1828       linux_fork_detach (args, from_tty);
1829 
1830       if (non_stop && target_can_async_p ())
1831  	target_async (inferior_event_handler, 0);
1832     }
1833   else
1834     linux_ops->to_detach (ops, args, from_tty);
1835 }
1836 
1837 /* Resume LP.  */
1838 
1839 static int
1840 resume_callback (struct lwp_info *lp, void *data)
1841 {
1842   struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1843 
1844   if (lp->stopped && inf->vfork_child != NULL)
1845     {
1846       if (debug_linux_nat)
1847 	fprintf_unfiltered (gdb_stdlog,
1848 			    "RC: Not resuming %s (vfork parent)\n",
1849 			    target_pid_to_str (lp->ptid));
1850     }
1851   else if (lp->stopped && lp->status == 0)
1852     {
1853       if (debug_linux_nat)
1854 	fprintf_unfiltered (gdb_stdlog,
1855 			    "RC:  PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1856 			    target_pid_to_str (lp->ptid));
1857 
1858       linux_ops->to_resume (linux_ops,
1859 			    pid_to_ptid (GET_LWP (lp->ptid)),
1860 			    0, TARGET_SIGNAL_0);
1861       if (debug_linux_nat)
1862 	fprintf_unfiltered (gdb_stdlog,
1863 			    "RC:  PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1864 			    target_pid_to_str (lp->ptid));
1865       lp->stopped = 0;
1866       lp->step = 0;
1867       memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1868       lp->stopped_by_watchpoint = 0;
1869     }
1870   else if (lp->stopped && debug_linux_nat)
1871     fprintf_unfiltered (gdb_stdlog,
1872 			"RC: Not resuming sibling %s (has pending)\n",
1873 			target_pid_to_str (lp->ptid));
1874   else if (debug_linux_nat)
1875     fprintf_unfiltered (gdb_stdlog,
1876 			"RC: Not resuming sibling %s (not stopped)\n",
1877 			target_pid_to_str (lp->ptid));
1878 
1879   return 0;
1880 }
1881 
1882 static int
1883 resume_clear_callback (struct lwp_info *lp, void *data)
1884 {
1885   lp->resumed = 0;
1886   return 0;
1887 }
1888 
1889 static int
1890 resume_set_callback (struct lwp_info *lp, void *data)
1891 {
1892   lp->resumed = 1;
1893   return 0;
1894 }
1895 
1896 static void
1897 linux_nat_resume (struct target_ops *ops,
1898 		  ptid_t ptid, int step, enum target_signal signo)
1899 {
1900   sigset_t prev_mask;
1901   struct lwp_info *lp;
1902   int resume_many;
1903 
1904   if (debug_linux_nat)
1905     fprintf_unfiltered (gdb_stdlog,
1906 			"LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1907 			step ? "step" : "resume",
1908 			target_pid_to_str (ptid),
1909 			(signo != TARGET_SIGNAL_0
1910 			 ? strsignal (target_signal_to_host (signo)) : "0"),
1911 			target_pid_to_str (inferior_ptid));
1912 
1913   block_child_signals (&prev_mask);
1914 
1915   /* A specific PTID means `step only this process id'.  */
1916   resume_many = (ptid_equal (minus_one_ptid, ptid)
1917 		 || ptid_is_pid (ptid));
1918 
1919   /* Mark the lwps we're resuming as resumed.  */
1920   iterate_over_lwps (ptid, resume_set_callback, NULL);
1921 
1922   /* See if it's the current inferior that should be handled
1923      specially.  */
1924   if (resume_many)
1925     lp = find_lwp_pid (inferior_ptid);
1926   else
1927     lp = find_lwp_pid (ptid);
1928   gdb_assert (lp != NULL);
1929 
1930   /* Remember if we're stepping.  */
1931   lp->step = step;
1932 
1933   /* If we have a pending wait status for this thread, there is no
1934      point in resuming the process.  But first make sure that
1935      linux_nat_wait won't preemptively handle the event - we
1936      should never take this short-circuit if we are going to
1937      leave LP running, since we have skipped resuming all the
1938      other threads.  This bit of code needs to be synchronized
1939      with linux_nat_wait.  */
1940 
1941   if (lp->status && WIFSTOPPED (lp->status))
1942     {
1943       enum target_signal saved_signo;
1944       struct inferior *inf;
1945 
1946       inf = find_inferior_pid (ptid_get_pid (lp->ptid));
1947       gdb_assert (inf);
1948       saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1949 
1950       /* Defer to common code if we're gaining control of the
1951 	 inferior.  */
1952       if (inf->control.stop_soon == NO_STOP_QUIETLY
1953 	  && signal_stop_state (saved_signo) == 0
1954 	  && signal_print_state (saved_signo) == 0
1955 	  && signal_pass_state (saved_signo) == 1)
1956 	{
1957 	  if (debug_linux_nat)
1958 	    fprintf_unfiltered (gdb_stdlog,
1959 				"LLR: Not short circuiting for ignored "
1960 				"status 0x%x\n", lp->status);
1961 
1962 	  /* FIXME: What should we do if we are supposed to continue
1963 	     this thread with a signal?  */
1964 	  gdb_assert (signo == TARGET_SIGNAL_0);
1965 	  signo = saved_signo;
1966 	  lp->status = 0;
1967 	}
1968     }
1969 
1970   if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1971     {
1972       /* FIXME: What should we do if we are supposed to continue
1973 	 this thread with a signal?  */
1974       gdb_assert (signo == TARGET_SIGNAL_0);
1975 
1976       if (debug_linux_nat)
1977 	fprintf_unfiltered (gdb_stdlog,
1978 			    "LLR: Short circuiting for status 0x%x\n",
1979 			    lp->status);
1980 
1981       restore_child_signals_mask (&prev_mask);
1982       if (target_can_async_p ())
1983 	{
1984 	  target_async (inferior_event_handler, 0);
1985 	  /* Tell the event loop we have something to process.  */
1986 	  async_file_mark ();
1987 	}
1988       return;
1989     }
1990 
1991   /* Mark LWP as not stopped to prevent it from being continued by
1992      resume_callback.  */
1993   lp->stopped = 0;
1994 
1995   if (resume_many)
1996     iterate_over_lwps (ptid, resume_callback, NULL);
1997 
1998   /* Convert to something the lower layer understands.  */
1999   ptid = pid_to_ptid (GET_LWP (lp->ptid));
2000 
2001   linux_ops->to_resume (linux_ops, ptid, step, signo);
2002   memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2003   lp->stopped_by_watchpoint = 0;
2004 
2005   if (debug_linux_nat)
2006     fprintf_unfiltered (gdb_stdlog,
2007 			"LLR: %s %s, %s (resume event thread)\n",
2008 			step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2009 			target_pid_to_str (ptid),
2010 			(signo != TARGET_SIGNAL_0
2011 			 ? strsignal (target_signal_to_host (signo)) : "0"));
2012 
2013   restore_child_signals_mask (&prev_mask);
2014   if (target_can_async_p ())
2015     target_async (inferior_event_handler, 0);
2016 }
2017 
2018 /* Send a signal to an LWP.  */
2019 
2020 static int
2021 kill_lwp (int lwpid, int signo)
2022 {
2023   /* Use tkill, if possible, in case we are using nptl threads.  If tkill
2024      fails, then we are not using nptl threads and we should be using kill.  */
2025 
2026 #ifdef HAVE_TKILL_SYSCALL
2027   {
2028     static int tkill_failed;
2029 
2030     if (!tkill_failed)
2031       {
2032 	int ret;
2033 
2034 	errno = 0;
2035 	ret = syscall (__NR_tkill, lwpid, signo);
2036 	if (errno != ENOSYS)
2037 	  return ret;
2038 	tkill_failed = 1;
2039       }
2040   }
2041 #endif
2042 
2043   return kill (lwpid, signo);
2044 }
2045 
2046 /* Handle a GNU/Linux syscall trap wait response.  If we see a syscall
2047    event, check if the core is interested in it: if not, ignore the
2048    event, and keep waiting; otherwise, we need to toggle the LWP's
2049    syscall entry/exit status, since the ptrace event itself doesn't
2050    indicate it, and report the trap to higher layers.  */
2051 
2052 static int
2053 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2054 {
2055   struct target_waitstatus *ourstatus = &lp->waitstatus;
2056   struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2057   int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2058 
2059   if (stopping)
2060     {
2061       /* If we're stopping threads, there's a SIGSTOP pending, which
2062 	 makes it so that the LWP reports an immediate syscall return,
2063 	 followed by the SIGSTOP.  Skip seeing that "return" using
2064 	 PTRACE_CONT directly, and let stop_wait_callback collect the
2065 	 SIGSTOP.  Later when the thread is resumed, a new syscall
2066 	 entry event.  If we didn't do this (and returned 0), we'd
2067 	 leave a syscall entry pending, and our caller, by using
2068 	 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2069 	 itself.  Later, when the user re-resumes this LWP, we'd see
2070 	 another syscall entry event and we'd mistake it for a return.
2071 
2072 	 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2073 	 (leaving immediately with LWP->signalled set, without issuing
2074 	 a PTRACE_CONT), it would still be problematic to leave this
2075 	 syscall enter pending, as later when the thread is resumed,
2076 	 it would then see the same syscall exit mentioned above,
2077 	 followed by the delayed SIGSTOP, while the syscall didn't
2078 	 actually get to execute.  It seems it would be even more
2079 	 confusing to the user.  */
2080 
2081       if (debug_linux_nat)
2082 	fprintf_unfiltered (gdb_stdlog,
2083 			    "LHST: ignoring syscall %d "
2084 			    "for LWP %ld (stopping threads), "
2085 			    "resuming with PTRACE_CONT for SIGSTOP\n",
2086 			    syscall_number,
2087 			    GET_LWP (lp->ptid));
2088 
2089       lp->syscall_state = TARGET_WAITKIND_IGNORE;
2090       ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2091       return 1;
2092     }
2093 
2094   if (catch_syscall_enabled ())
2095     {
2096       /* Always update the entry/return state, even if this particular
2097 	 syscall isn't interesting to the core now.  In async mode,
2098 	 the user could install a new catchpoint for this syscall
2099 	 between syscall enter/return, and we'll need to know to
2100 	 report a syscall return if that happens.  */
2101       lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2102 			   ? TARGET_WAITKIND_SYSCALL_RETURN
2103 			   : TARGET_WAITKIND_SYSCALL_ENTRY);
2104 
2105       if (catching_syscall_number (syscall_number))
2106 	{
2107 	  /* Alright, an event to report.  */
2108 	  ourstatus->kind = lp->syscall_state;
2109 	  ourstatus->value.syscall_number = syscall_number;
2110 
2111 	  if (debug_linux_nat)
2112 	    fprintf_unfiltered (gdb_stdlog,
2113 				"LHST: stopping for %s of syscall %d"
2114 				" for LWP %ld\n",
2115 				lp->syscall_state
2116 				== TARGET_WAITKIND_SYSCALL_ENTRY
2117 				? "entry" : "return",
2118 				syscall_number,
2119 				GET_LWP (lp->ptid));
2120 	  return 0;
2121 	}
2122 
2123       if (debug_linux_nat)
2124 	fprintf_unfiltered (gdb_stdlog,
2125 			    "LHST: ignoring %s of syscall %d "
2126 			    "for LWP %ld\n",
2127 			    lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2128 			    ? "entry" : "return",
2129 			    syscall_number,
2130 			    GET_LWP (lp->ptid));
2131     }
2132   else
2133     {
2134       /* If we had been syscall tracing, and hence used PT_SYSCALL
2135 	 before on this LWP, it could happen that the user removes all
2136 	 syscall catchpoints before we get to process this event.
2137 	 There are two noteworthy issues here:
2138 
2139 	 - When stopped at a syscall entry event, resuming with
2140 	   PT_STEP still resumes executing the syscall and reports a
2141 	   syscall return.
2142 
2143 	 - Only PT_SYSCALL catches syscall enters.  If we last
2144 	   single-stepped this thread, then this event can't be a
2145 	   syscall enter.  If we last single-stepped this thread, this
2146 	   has to be a syscall exit.
2147 
2148 	 The points above mean that the next resume, be it PT_STEP or
2149 	 PT_CONTINUE, can not trigger a syscall trace event.  */
2150       if (debug_linux_nat)
2151 	fprintf_unfiltered (gdb_stdlog,
2152 			    "LHST: caught syscall event "
2153 			    "with no syscall catchpoints."
2154 			    " %d for LWP %ld, ignoring\n",
2155 			    syscall_number,
2156 			    GET_LWP (lp->ptid));
2157       lp->syscall_state = TARGET_WAITKIND_IGNORE;
2158     }
2159 
2160   /* The core isn't interested in this event.  For efficiency, avoid
2161      stopping all threads only to have the core resume them all again.
2162      Since we're not stopping threads, if we're still syscall tracing
2163      and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2164      subsequent syscall.  Simply resume using the inf-ptrace layer,
2165      which knows when to use PT_SYSCALL or PT_CONTINUE.  */
2166 
2167   /* Note that gdbarch_get_syscall_number may access registers, hence
2168      fill a regcache.  */
2169   registers_changed ();
2170   linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2171 			lp->step, TARGET_SIGNAL_0);
2172   return 1;
2173 }
2174 
2175 /* Handle a GNU/Linux extended wait response.  If we see a clone
2176    event, we need to add the new LWP to our list (and not report the
2177    trap to higher layers).  This function returns non-zero if the
2178    event should be ignored and we should wait again.  If STOPPING is
2179    true, the new LWP remains stopped, otherwise it is continued.  */
2180 
2181 static int
2182 linux_handle_extended_wait (struct lwp_info *lp, int status,
2183 			    int stopping)
2184 {
2185   int pid = GET_LWP (lp->ptid);
2186   struct target_waitstatus *ourstatus = &lp->waitstatus;
2187   int event = status >> 16;
2188 
2189   if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2190       || event == PTRACE_EVENT_CLONE)
2191     {
2192       unsigned long new_pid;
2193       int ret;
2194 
2195       ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2196 
2197       /* If we haven't already seen the new PID stop, wait for it now.  */
2198       if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2199 	{
2200 	  /* The new child has a pending SIGSTOP.  We can't affect it until it
2201 	     hits the SIGSTOP, but we're already attached.  */
2202 	  ret = my_waitpid (new_pid, &status,
2203 			    (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2204 	  if (ret == -1)
2205 	    perror_with_name (_("waiting for new child"));
2206 	  else if (ret != new_pid)
2207 	    internal_error (__FILE__, __LINE__,
2208 			    _("wait returned unexpected PID %d"), ret);
2209 	  else if (!WIFSTOPPED (status))
2210 	    internal_error (__FILE__, __LINE__,
2211 			    _("wait returned unexpected status 0x%x"), status);
2212 	}
2213 
2214       ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2215 
2216       if (event == PTRACE_EVENT_FORK
2217 	  && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2218 	{
2219 	  /* Handle checkpointing by linux-fork.c here as a special
2220 	     case.  We don't want the follow-fork-mode or 'catch fork'
2221 	     to interfere with this.  */
2222 
2223 	  /* This won't actually modify the breakpoint list, but will
2224 	     physically remove the breakpoints from the child.  */
2225 	  detach_breakpoints (new_pid);
2226 
2227 	  /* Retain child fork in ptrace (stopped) state.  */
2228 	  if (!find_fork_pid (new_pid))
2229 	    add_fork (new_pid);
2230 
2231 	  /* Report as spurious, so that infrun doesn't want to follow
2232 	     this fork.  We're actually doing an infcall in
2233 	     linux-fork.c.  */
2234 	  ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2235 	  linux_enable_event_reporting (pid_to_ptid (new_pid));
2236 
2237 	  /* Report the stop to the core.  */
2238 	  return 0;
2239 	}
2240 
2241       if (event == PTRACE_EVENT_FORK)
2242 	ourstatus->kind = TARGET_WAITKIND_FORKED;
2243       else if (event == PTRACE_EVENT_VFORK)
2244 	ourstatus->kind = TARGET_WAITKIND_VFORKED;
2245       else
2246 	{
2247 	  struct lwp_info *new_lp;
2248 
2249 	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
2250 
2251 	  new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2252 	  new_lp->cloned = 1;
2253 	  new_lp->stopped = 1;
2254 
2255 	  if (WSTOPSIG (status) != SIGSTOP)
2256 	    {
2257 	      /* This can happen if someone starts sending signals to
2258 		 the new thread before it gets a chance to run, which
2259 		 have a lower number than SIGSTOP (e.g. SIGUSR1).
2260 		 This is an unlikely case, and harder to handle for
2261 		 fork / vfork than for clone, so we do not try - but
2262 		 we handle it for clone events here.  We'll send
2263 		 the other signal on to the thread below.  */
2264 
2265 	      new_lp->signalled = 1;
2266 	    }
2267 	  else
2268 	    status = 0;
2269 
2270 	  if (non_stop)
2271 	    {
2272 	      /* Add the new thread to GDB's lists as soon as possible
2273 		 so that:
2274 
2275 		 1) the frontend doesn't have to wait for a stop to
2276 		 display them, and,
2277 
2278 		 2) we tag it with the correct running state.  */
2279 
2280 	      /* If the thread_db layer is active, let it know about
2281 		 this new thread, and add it to GDB's list.  */
2282 	      if (!thread_db_attach_lwp (new_lp->ptid))
2283 		{
2284 		  /* We're not using thread_db.  Add it to GDB's
2285 		     list.  */
2286 		  target_post_attach (GET_LWP (new_lp->ptid));
2287 		  add_thread (new_lp->ptid);
2288 		}
2289 
2290 	      if (!stopping)
2291 		{
2292 		  set_running (new_lp->ptid, 1);
2293 		  set_executing (new_lp->ptid, 1);
2294 		}
2295 	    }
2296 
2297 	  /* Note the need to use the low target ops to resume, to
2298 	     handle resuming with PT_SYSCALL if we have syscall
2299 	     catchpoints.  */
2300 	  if (!stopping)
2301 	    {
2302 	      enum target_signal signo;
2303 
2304 	      new_lp->stopped = 0;
2305 	      new_lp->resumed = 1;
2306 
2307 	      signo = (status
2308 		       ? target_signal_from_host (WSTOPSIG (status))
2309 		       : TARGET_SIGNAL_0);
2310 
2311 	      linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2312 				    0, signo);
2313 	    }
2314 	  else
2315 	    {
2316 	      if (status != 0)
2317 		{
2318 		  /* We created NEW_LP so it cannot yet contain STATUS.  */
2319 		  gdb_assert (new_lp->status == 0);
2320 
2321 		  /* Save the wait status to report later.  */
2322 		  if (debug_linux_nat)
2323 		    fprintf_unfiltered (gdb_stdlog,
2324 					"LHEW: waitpid of new LWP %ld, "
2325 					"saving status %s\n",
2326 					(long) GET_LWP (new_lp->ptid),
2327 					status_to_str (status));
2328 		  new_lp->status = status;
2329 		}
2330 	    }
2331 
2332 	  if (debug_linux_nat)
2333 	    fprintf_unfiltered (gdb_stdlog,
2334 				"LHEW: Got clone event "
2335 				"from LWP %ld, resuming\n",
2336 				GET_LWP (lp->ptid));
2337 	  linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2338 				0, TARGET_SIGNAL_0);
2339 
2340 	  return 1;
2341 	}
2342 
2343       return 0;
2344     }
2345 
2346   if (event == PTRACE_EVENT_EXEC)
2347     {
2348       if (debug_linux_nat)
2349 	fprintf_unfiltered (gdb_stdlog,
2350 			    "LHEW: Got exec event from LWP %ld\n",
2351 			    GET_LWP (lp->ptid));
2352 
2353       ourstatus->kind = TARGET_WAITKIND_EXECD;
2354       ourstatus->value.execd_pathname
2355 	= xstrdup (linux_child_pid_to_exec_file (pid));
2356 
2357       return 0;
2358     }
2359 
2360   if (event == PTRACE_EVENT_VFORK_DONE)
2361     {
2362       if (current_inferior ()->waiting_for_vfork_done)
2363 	{
2364 	  if (debug_linux_nat)
2365 	    fprintf_unfiltered (gdb_stdlog,
2366 				"LHEW: Got expected PTRACE_EVENT_"
2367 				"VFORK_DONE from LWP %ld: stopping\n",
2368 				GET_LWP (lp->ptid));
2369 
2370 	  ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2371 	  return 0;
2372 	}
2373 
2374       if (debug_linux_nat)
2375 	fprintf_unfiltered (gdb_stdlog,
2376 			    "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2377 			    "from LWP %ld: resuming\n",
2378 			    GET_LWP (lp->ptid));
2379       ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2380       return 1;
2381     }
2382 
2383   internal_error (__FILE__, __LINE__,
2384 		  _("unknown ptrace event %d"), event);
2385 }
2386 
2387 /* Wait for LP to stop.  Returns the wait status, or 0 if the LWP has
2388    exited.  */
2389 
2390 static int
2391 wait_lwp (struct lwp_info *lp)
2392 {
2393   pid_t pid;
2394   int status;
2395   int thread_dead = 0;
2396 
2397   gdb_assert (!lp->stopped);
2398   gdb_assert (lp->status == 0);
2399 
2400   pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
2401   if (pid == -1 && errno == ECHILD)
2402     {
2403       pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
2404       if (pid == -1 && errno == ECHILD)
2405 	{
2406 	  /* The thread has previously exited.  We need to delete it
2407 	     now because, for some vendor 2.4 kernels with NPTL
2408 	     support backported, there won't be an exit event unless
2409 	     it is the main thread.  2.6 kernels will report an exit
2410 	     event for each thread that exits, as expected.  */
2411 	  thread_dead = 1;
2412 	  if (debug_linux_nat)
2413 	    fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2414 				target_pid_to_str (lp->ptid));
2415 	}
2416     }
2417 
2418   if (!thread_dead)
2419     {
2420       gdb_assert (pid == GET_LWP (lp->ptid));
2421 
2422       if (debug_linux_nat)
2423 	{
2424 	  fprintf_unfiltered (gdb_stdlog,
2425 			      "WL: waitpid %s received %s\n",
2426 			      target_pid_to_str (lp->ptid),
2427 			      status_to_str (status));
2428 	}
2429     }
2430 
2431   /* Check if the thread has exited.  */
2432   if (WIFEXITED (status) || WIFSIGNALED (status))
2433     {
2434       thread_dead = 1;
2435       if (debug_linux_nat)
2436 	fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2437 			    target_pid_to_str (lp->ptid));
2438     }
2439 
2440   if (thread_dead)
2441     {
2442       exit_lwp (lp);
2443       return 0;
2444     }
2445 
2446   gdb_assert (WIFSTOPPED (status));
2447 
2448   /* Handle GNU/Linux's syscall SIGTRAPs.  */
2449   if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2450     {
2451       /* No longer need the sysgood bit.  The ptrace event ends up
2452 	 recorded in lp->waitstatus if we care for it.  We can carry
2453 	 on handling the event like a regular SIGTRAP from here
2454 	 on.  */
2455       status = W_STOPCODE (SIGTRAP);
2456       if (linux_handle_syscall_trap (lp, 1))
2457 	return wait_lwp (lp);
2458     }
2459 
2460   /* Handle GNU/Linux's extended waitstatus for trace events.  */
2461   if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2462     {
2463       if (debug_linux_nat)
2464 	fprintf_unfiltered (gdb_stdlog,
2465 			    "WL: Handling extended status 0x%06x\n",
2466 			    status);
2467       if (linux_handle_extended_wait (lp, status, 1))
2468 	return wait_lwp (lp);
2469     }
2470 
2471   return status;
2472 }
2473 
2474 /* Save the most recent siginfo for LP.  This is currently only called
2475    for SIGTRAP; some ports use the si_addr field for
2476    target_stopped_data_address.  In the future, it may also be used to
2477    restore the siginfo of requeued signals.  */
2478 
2479 static void
2480 save_siginfo (struct lwp_info *lp)
2481 {
2482   errno = 0;
2483   ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2484 	  (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2485 
2486   if (errno != 0)
2487     memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2488 }
2489 
2490 /* Send a SIGSTOP to LP.  */
2491 
2492 static int
2493 stop_callback (struct lwp_info *lp, void *data)
2494 {
2495   if (!lp->stopped && !lp->signalled)
2496     {
2497       int ret;
2498 
2499       if (debug_linux_nat)
2500 	{
2501 	  fprintf_unfiltered (gdb_stdlog,
2502 			      "SC:  kill %s **<SIGSTOP>**\n",
2503 			      target_pid_to_str (lp->ptid));
2504 	}
2505       errno = 0;
2506       ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2507       if (debug_linux_nat)
2508 	{
2509 	  fprintf_unfiltered (gdb_stdlog,
2510 			      "SC:  lwp kill %d %s\n",
2511 			      ret,
2512 			      errno ? safe_strerror (errno) : "ERRNO-OK");
2513 	}
2514 
2515       lp->signalled = 1;
2516       gdb_assert (lp->status == 0);
2517     }
2518 
2519   return 0;
2520 }
2521 
2522 /* Return non-zero if LWP PID has a pending SIGINT.  */
2523 
2524 static int
2525 linux_nat_has_pending_sigint (int pid)
2526 {
2527   sigset_t pending, blocked, ignored;
2528 
2529   linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2530 
2531   if (sigismember (&pending, SIGINT)
2532       && !sigismember (&ignored, SIGINT))
2533     return 1;
2534 
2535   return 0;
2536 }
2537 
2538 /* Set a flag in LP indicating that we should ignore its next SIGINT.  */
2539 
2540 static int
2541 set_ignore_sigint (struct lwp_info *lp, void *data)
2542 {
2543   /* If a thread has a pending SIGINT, consume it; otherwise, set a
2544      flag to consume the next one.  */
2545   if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2546       && WSTOPSIG (lp->status) == SIGINT)
2547     lp->status = 0;
2548   else
2549     lp->ignore_sigint = 1;
2550 
2551   return 0;
2552 }
2553 
2554 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2555    This function is called after we know the LWP has stopped; if the LWP
2556    stopped before the expected SIGINT was delivered, then it will never have
2557    arrived.  Also, if the signal was delivered to a shared queue and consumed
2558    by a different thread, it will never be delivered to this LWP.  */
2559 
2560 static void
2561 maybe_clear_ignore_sigint (struct lwp_info *lp)
2562 {
2563   if (!lp->ignore_sigint)
2564     return;
2565 
2566   if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2567     {
2568       if (debug_linux_nat)
2569 	fprintf_unfiltered (gdb_stdlog,
2570 			    "MCIS: Clearing bogus flag for %s\n",
2571 			    target_pid_to_str (lp->ptid));
2572       lp->ignore_sigint = 0;
2573     }
2574 }
2575 
2576 /* Fetch the possible triggered data watchpoint info and store it in
2577    LP.
2578 
2579    On some archs, like x86, that use debug registers to set
2580    watchpoints, it's possible that the way to know which watched
2581    address trapped, is to check the register that is used to select
2582    which address to watch.  Problem is, between setting the watchpoint
2583    and reading back which data address trapped, the user may change
2584    the set of watchpoints, and, as a consequence, GDB changes the
2585    debug registers in the inferior.  To avoid reading back a stale
2586    stopped-data-address when that happens, we cache in LP the fact
2587    that a watchpoint trapped, and the corresponding data address, as
2588    soon as we see LP stop with a SIGTRAP.  If GDB changes the debug
2589    registers meanwhile, we have the cached data we can rely on.  */
2590 
2591 static void
2592 save_sigtrap (struct lwp_info *lp)
2593 {
2594   struct cleanup *old_chain;
2595 
2596   if (linux_ops->to_stopped_by_watchpoint == NULL)
2597     {
2598       lp->stopped_by_watchpoint = 0;
2599       return;
2600     }
2601 
2602   old_chain = save_inferior_ptid ();
2603   inferior_ptid = lp->ptid;
2604 
2605   lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2606 
2607   if (lp->stopped_by_watchpoint)
2608     {
2609       if (linux_ops->to_stopped_data_address != NULL)
2610 	lp->stopped_data_address_p =
2611 	  linux_ops->to_stopped_data_address (&current_target,
2612 					      &lp->stopped_data_address);
2613       else
2614 	lp->stopped_data_address_p = 0;
2615     }
2616 
2617   do_cleanups (old_chain);
2618 }
2619 
2620 /* See save_sigtrap.  */
2621 
2622 static int
2623 linux_nat_stopped_by_watchpoint (void)
2624 {
2625   struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2626 
2627   gdb_assert (lp != NULL);
2628 
2629   return lp->stopped_by_watchpoint;
2630 }
2631 
2632 static int
2633 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2634 {
2635   struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2636 
2637   gdb_assert (lp != NULL);
2638 
2639   *addr_p = lp->stopped_data_address;
2640 
2641   return lp->stopped_data_address_p;
2642 }
2643 
2644 /* Commonly any breakpoint / watchpoint generate only SIGTRAP.  */
2645 
2646 static int
2647 sigtrap_is_event (int status)
2648 {
2649   return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2650 }
2651 
2652 /* SIGTRAP-like events recognizer.  */
2653 
2654 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2655 
2656 /* Check for SIGTRAP-like events in LP.  */
2657 
2658 static int
2659 linux_nat_lp_status_is_event (struct lwp_info *lp)
2660 {
2661   /* We check for lp->waitstatus in addition to lp->status, because we can
2662      have pending process exits recorded in lp->status
2663      and W_EXITCODE(0,0) == 0.  We should probably have an additional
2664      lp->status_p flag.  */
2665 
2666   return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2667 	  && linux_nat_status_is_event (lp->status));
2668 }
2669 
2670 /* Set alternative SIGTRAP-like events recognizer.  If
2671    breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2672    applied.  */
2673 
2674 void
2675 linux_nat_set_status_is_event (struct target_ops *t,
2676 			       int (*status_is_event) (int status))
2677 {
2678   linux_nat_status_is_event = status_is_event;
2679 }
2680 
2681 /* Wait until LP is stopped.  */
2682 
2683 static int
2684 stop_wait_callback (struct lwp_info *lp, void *data)
2685 {
2686   struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2687 
2688   /* If this is a vfork parent, bail out, it is not going to report
2689      any SIGSTOP until the vfork is done with.  */
2690   if (inf->vfork_child != NULL)
2691     return 0;
2692 
2693   if (!lp->stopped)
2694     {
2695       int status;
2696 
2697       status = wait_lwp (lp);
2698       if (status == 0)
2699 	return 0;
2700 
2701       if (lp->ignore_sigint && WIFSTOPPED (status)
2702 	  && WSTOPSIG (status) == SIGINT)
2703 	{
2704 	  lp->ignore_sigint = 0;
2705 
2706 	  errno = 0;
2707 	  ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2708 	  if (debug_linux_nat)
2709 	    fprintf_unfiltered (gdb_stdlog,
2710 				"PTRACE_CONT %s, 0, 0 (%s) "
2711 				"(discarding SIGINT)\n",
2712 				target_pid_to_str (lp->ptid),
2713 				errno ? safe_strerror (errno) : "OK");
2714 
2715 	  return stop_wait_callback (lp, NULL);
2716 	}
2717 
2718       maybe_clear_ignore_sigint (lp);
2719 
2720       if (WSTOPSIG (status) != SIGSTOP)
2721 	{
2722 	  if (linux_nat_status_is_event (status))
2723 	    {
2724 	      /* If a LWP other than the LWP that we're reporting an
2725 	         event for has hit a GDB breakpoint (as opposed to
2726 	         some random trap signal), then just arrange for it to
2727 	         hit it again later.  We don't keep the SIGTRAP status
2728 	         and don't forward the SIGTRAP signal to the LWP.  We
2729 	         will handle the current event, eventually we will
2730 	         resume all LWPs, and this one will get its breakpoint
2731 	         trap again.
2732 
2733 	         If we do not do this, then we run the risk that the
2734 	         user will delete or disable the breakpoint, but the
2735 	         thread will have already tripped on it.  */
2736 
2737 	      /* Save the trap's siginfo in case we need it later.  */
2738 	      save_siginfo (lp);
2739 
2740 	      save_sigtrap (lp);
2741 
2742 	      /* Now resume this LWP and get the SIGSTOP event.  */
2743 	      errno = 0;
2744 	      ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2745 	      if (debug_linux_nat)
2746 		{
2747 		  fprintf_unfiltered (gdb_stdlog,
2748 				      "PTRACE_CONT %s, 0, 0 (%s)\n",
2749 				      target_pid_to_str (lp->ptid),
2750 				      errno ? safe_strerror (errno) : "OK");
2751 
2752 		  fprintf_unfiltered (gdb_stdlog,
2753 				      "SWC: Candidate SIGTRAP event in %s\n",
2754 				      target_pid_to_str (lp->ptid));
2755 		}
2756 	      /* Hold this event/waitstatus while we check to see if
2757 		 there are any more (we still want to get that SIGSTOP).  */
2758 	      stop_wait_callback (lp, NULL);
2759 
2760 	      /* Hold the SIGTRAP for handling by linux_nat_wait.  If
2761 		 there's another event, throw it back into the
2762 		 queue.  */
2763 	      if (lp->status)
2764 		{
2765 		  if (debug_linux_nat)
2766 		    fprintf_unfiltered (gdb_stdlog,
2767 					"SWC: kill %s, %s\n",
2768 					target_pid_to_str (lp->ptid),
2769 					status_to_str ((int) status));
2770 		  kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2771 		}
2772 
2773 	      /* Save the sigtrap event.  */
2774 	      lp->status = status;
2775 	      return 0;
2776 	    }
2777 	  else
2778 	    {
2779 	      /* The thread was stopped with a signal other than
2780 	         SIGSTOP, and didn't accidentally trip a breakpoint.  */
2781 
2782 	      if (debug_linux_nat)
2783 		{
2784 		  fprintf_unfiltered (gdb_stdlog,
2785 				      "SWC: Pending event %s in %s\n",
2786 				      status_to_str ((int) status),
2787 				      target_pid_to_str (lp->ptid));
2788 		}
2789 	      /* Now resume this LWP and get the SIGSTOP event.  */
2790 	      errno = 0;
2791 	      ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2792 	      if (debug_linux_nat)
2793 		fprintf_unfiltered (gdb_stdlog,
2794 				    "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2795 				    target_pid_to_str (lp->ptid),
2796 				    errno ? safe_strerror (errno) : "OK");
2797 
2798 	      /* Hold this event/waitstatus while we check to see if
2799 	         there are any more (we still want to get that SIGSTOP).  */
2800 	      stop_wait_callback (lp, NULL);
2801 
2802 	      /* If the lp->status field is still empty, use it to
2803 		 hold this event.  If not, then this event must be
2804 		 returned to the event queue of the LWP.  */
2805 	      if (lp->status)
2806 		{
2807 		  if (debug_linux_nat)
2808 		    {
2809 		      fprintf_unfiltered (gdb_stdlog,
2810 					  "SWC: kill %s, %s\n",
2811 					  target_pid_to_str (lp->ptid),
2812 					  status_to_str ((int) status));
2813 		    }
2814 		  kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2815 		}
2816 	      else
2817 		lp->status = status;
2818 	      return 0;
2819 	    }
2820 	}
2821       else
2822 	{
2823 	  /* We caught the SIGSTOP that we intended to catch, so
2824 	     there's no SIGSTOP pending.  */
2825 	  lp->stopped = 1;
2826 	  lp->signalled = 0;
2827 	}
2828     }
2829 
2830   return 0;
2831 }
2832 
2833 /* Return non-zero if LP has a wait status pending.  */
2834 
2835 static int
2836 status_callback (struct lwp_info *lp, void *data)
2837 {
2838   /* Only report a pending wait status if we pretend that this has
2839      indeed been resumed.  */
2840   if (!lp->resumed)
2841     return 0;
2842 
2843   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2844     {
2845       /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2846 	 or a pending process exit.  Note that `W_EXITCODE(0,0) ==
2847 	 0', so a clean process exit can not be stored pending in
2848 	 lp->status, it is indistinguishable from
2849 	 no-pending-status.  */
2850       return 1;
2851     }
2852 
2853   if (lp->status != 0)
2854     return 1;
2855 
2856   return 0;
2857 }
2858 
2859 /* Return non-zero if LP isn't stopped.  */
2860 
2861 static int
2862 running_callback (struct lwp_info *lp, void *data)
2863 {
2864   return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2865 }
2866 
2867 /* Count the LWP's that have had events.  */
2868 
2869 static int
2870 count_events_callback (struct lwp_info *lp, void *data)
2871 {
2872   int *count = data;
2873 
2874   gdb_assert (count != NULL);
2875 
2876   /* Count only resumed LWPs that have a SIGTRAP event pending.  */
2877   if (lp->resumed && linux_nat_lp_status_is_event (lp))
2878     (*count)++;
2879 
2880   return 0;
2881 }
2882 
2883 /* Select the LWP (if any) that is currently being single-stepped.  */
2884 
2885 static int
2886 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2887 {
2888   if (lp->step && lp->status != 0)
2889     return 1;
2890   else
2891     return 0;
2892 }
2893 
2894 /* Select the Nth LWP that has had a SIGTRAP event.  */
2895 
2896 static int
2897 select_event_lwp_callback (struct lwp_info *lp, void *data)
2898 {
2899   int *selector = data;
2900 
2901   gdb_assert (selector != NULL);
2902 
2903   /* Select only resumed LWPs that have a SIGTRAP event pending.  */
2904   if (lp->resumed && linux_nat_lp_status_is_event (lp))
2905     if ((*selector)-- == 0)
2906       return 1;
2907 
2908   return 0;
2909 }
2910 
2911 static int
2912 cancel_breakpoint (struct lwp_info *lp)
2913 {
2914   /* Arrange for a breakpoint to be hit again later.  We don't keep
2915      the SIGTRAP status and don't forward the SIGTRAP signal to the
2916      LWP.  We will handle the current event, eventually we will resume
2917      this LWP, and this breakpoint will trap again.
2918 
2919      If we do not do this, then we run the risk that the user will
2920      delete or disable the breakpoint, but the LWP will have already
2921      tripped on it.  */
2922 
2923   struct regcache *regcache = get_thread_regcache (lp->ptid);
2924   struct gdbarch *gdbarch = get_regcache_arch (regcache);
2925   CORE_ADDR pc;
2926 
2927   pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2928   if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2929     {
2930       if (debug_linux_nat)
2931 	fprintf_unfiltered (gdb_stdlog,
2932 			    "CB: Push back breakpoint for %s\n",
2933 			    target_pid_to_str (lp->ptid));
2934 
2935       /* Back up the PC if necessary.  */
2936       if (gdbarch_decr_pc_after_break (gdbarch))
2937 	regcache_write_pc (regcache, pc);
2938 
2939       return 1;
2940     }
2941   return 0;
2942 }
2943 
2944 static int
2945 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2946 {
2947   struct lwp_info *event_lp = data;
2948 
2949   /* Leave the LWP that has been elected to receive a SIGTRAP alone.  */
2950   if (lp == event_lp)
2951     return 0;
2952 
2953   /* If a LWP other than the LWP that we're reporting an event for has
2954      hit a GDB breakpoint (as opposed to some random trap signal),
2955      then just arrange for it to hit it again later.  We don't keep
2956      the SIGTRAP status and don't forward the SIGTRAP signal to the
2957      LWP.  We will handle the current event, eventually we will resume
2958      all LWPs, and this one will get its breakpoint trap again.
2959 
2960      If we do not do this, then we run the risk that the user will
2961      delete or disable the breakpoint, but the LWP will have already
2962      tripped on it.  */
2963 
2964   if (linux_nat_lp_status_is_event (lp)
2965       && cancel_breakpoint (lp))
2966     /* Throw away the SIGTRAP.  */
2967     lp->status = 0;
2968 
2969   return 0;
2970 }
2971 
2972 /* Select one LWP out of those that have events pending.  */
2973 
2974 static void
2975 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2976 {
2977   int num_events = 0;
2978   int random_selector;
2979   struct lwp_info *event_lp;
2980 
2981   /* Record the wait status for the original LWP.  */
2982   (*orig_lp)->status = *status;
2983 
2984   /* Give preference to any LWP that is being single-stepped.  */
2985   event_lp = iterate_over_lwps (filter,
2986 				select_singlestep_lwp_callback, NULL);
2987   if (event_lp != NULL)
2988     {
2989       if (debug_linux_nat)
2990 	fprintf_unfiltered (gdb_stdlog,
2991 			    "SEL: Select single-step %s\n",
2992 			    target_pid_to_str (event_lp->ptid));
2993     }
2994   else
2995     {
2996       /* No single-stepping LWP.  Select one at random, out of those
2997          which have had SIGTRAP events.  */
2998 
2999       /* First see how many SIGTRAP events we have.  */
3000       iterate_over_lwps (filter, count_events_callback, &num_events);
3001 
3002       /* Now randomly pick a LWP out of those that have had a SIGTRAP.  */
3003       random_selector = (int)
3004 	((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3005 
3006       if (debug_linux_nat && num_events > 1)
3007 	fprintf_unfiltered (gdb_stdlog,
3008 			    "SEL: Found %d SIGTRAP events, selecting #%d\n",
3009 			    num_events, random_selector);
3010 
3011       event_lp = iterate_over_lwps (filter,
3012 				    select_event_lwp_callback,
3013 				    &random_selector);
3014     }
3015 
3016   if (event_lp != NULL)
3017     {
3018       /* Switch the event LWP.  */
3019       *orig_lp = event_lp;
3020       *status = event_lp->status;
3021     }
3022 
3023   /* Flush the wait status for the event LWP.  */
3024   (*orig_lp)->status = 0;
3025 }
3026 
3027 /* Return non-zero if LP has been resumed.  */
3028 
3029 static int
3030 resumed_callback (struct lwp_info *lp, void *data)
3031 {
3032   return lp->resumed;
3033 }
3034 
3035 /* Stop an active thread, verify it still exists, then resume it.  */
3036 
3037 static int
3038 stop_and_resume_callback (struct lwp_info *lp, void *data)
3039 {
3040   struct lwp_info *ptr;
3041 
3042   if (!lp->stopped && !lp->signalled)
3043     {
3044       stop_callback (lp, NULL);
3045       stop_wait_callback (lp, NULL);
3046       /* Resume if the lwp still exists.  */
3047       for (ptr = lwp_list; ptr; ptr = ptr->next)
3048 	if (lp == ptr)
3049 	  {
3050 	    resume_callback (lp, NULL);
3051 	    resume_set_callback (lp, NULL);
3052 	  }
3053     }
3054   return 0;
3055 }
3056 
3057 /* Check if we should go on and pass this event to common code.
3058    Return the affected lwp if we are, or NULL otherwise.  */
3059 static struct lwp_info *
3060 linux_nat_filter_event (int lwpid, int status, int options)
3061 {
3062   struct lwp_info *lp;
3063 
3064   lp = find_lwp_pid (pid_to_ptid (lwpid));
3065 
3066   /* Check for stop events reported by a process we didn't already
3067      know about - anything not already in our LWP list.
3068 
3069      If we're expecting to receive stopped processes after
3070      fork, vfork, and clone events, then we'll just add the
3071      new one to our list and go back to waiting for the event
3072      to be reported - the stopped process might be returned
3073      from waitpid before or after the event is.  */
3074   if (WIFSTOPPED (status) && !lp)
3075     {
3076       linux_record_stopped_pid (lwpid, status);
3077       return NULL;
3078     }
3079 
3080   /* Make sure we don't report an event for the exit of an LWP not in
3081      our list, i.e. not part of the current process.  This can happen
3082      if we detach from a program we originally forked and then it
3083      exits.  */
3084   if (!WIFSTOPPED (status) && !lp)
3085     return NULL;
3086 
3087   /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3088      CLONE_PTRACE processes which do not use the thread library -
3089      otherwise we wouldn't find the new LWP this way.  That doesn't
3090      currently work, and the following code is currently unreachable
3091      due to the two blocks above.  If it's fixed some day, this code
3092      should be broken out into a function so that we can also pick up
3093      LWPs from the new interface.  */
3094   if (!lp)
3095     {
3096       lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3097       if (options & __WCLONE)
3098 	lp->cloned = 1;
3099 
3100       gdb_assert (WIFSTOPPED (status)
3101 		  && WSTOPSIG (status) == SIGSTOP);
3102       lp->signalled = 1;
3103 
3104       if (!in_thread_list (inferior_ptid))
3105 	{
3106 	  inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3107 				     GET_PID (inferior_ptid));
3108 	  add_thread (inferior_ptid);
3109 	}
3110 
3111       add_thread (lp->ptid);
3112     }
3113 
3114   /* Handle GNU/Linux's syscall SIGTRAPs.  */
3115   if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3116     {
3117       /* No longer need the sysgood bit.  The ptrace event ends up
3118 	 recorded in lp->waitstatus if we care for it.  We can carry
3119 	 on handling the event like a regular SIGTRAP from here
3120 	 on.  */
3121       status = W_STOPCODE (SIGTRAP);
3122       if (linux_handle_syscall_trap (lp, 0))
3123 	return NULL;
3124     }
3125 
3126   /* Handle GNU/Linux's extended waitstatus for trace events.  */
3127   if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3128     {
3129       if (debug_linux_nat)
3130 	fprintf_unfiltered (gdb_stdlog,
3131 			    "LLW: Handling extended status 0x%06x\n",
3132 			    status);
3133       if (linux_handle_extended_wait (lp, status, 0))
3134 	return NULL;
3135     }
3136 
3137   if (linux_nat_status_is_event (status))
3138     {
3139       /* Save the trap's siginfo in case we need it later.  */
3140       save_siginfo (lp);
3141 
3142       save_sigtrap (lp);
3143     }
3144 
3145   /* Check if the thread has exited.  */
3146   if ((WIFEXITED (status) || WIFSIGNALED (status))
3147       && num_lwps (GET_PID (lp->ptid)) > 1)
3148     {
3149       /* If this is the main thread, we must stop all threads and verify
3150 	 if they are still alive.  This is because in the nptl thread model
3151 	 on Linux 2.4, there is no signal issued for exiting LWPs
3152 	 other than the main thread.  We only get the main thread exit
3153 	 signal once all child threads have already exited.  If we
3154 	 stop all the threads and use the stop_wait_callback to check
3155 	 if they have exited we can determine whether this signal
3156 	 should be ignored or whether it means the end of the debugged
3157 	 application, regardless of which threading model is being
3158 	 used.  */
3159       if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3160 	{
3161 	  lp->stopped = 1;
3162 	  iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3163 			     stop_and_resume_callback, NULL);
3164 	}
3165 
3166       if (debug_linux_nat)
3167 	fprintf_unfiltered (gdb_stdlog,
3168 			    "LLW: %s exited.\n",
3169 			    target_pid_to_str (lp->ptid));
3170 
3171       if (num_lwps (GET_PID (lp->ptid)) > 1)
3172        {
3173 	 /* If there is at least one more LWP, then the exit signal
3174 	    was not the end of the debugged application and should be
3175 	    ignored.  */
3176 	 exit_lwp (lp);
3177 	 return NULL;
3178        }
3179     }
3180 
3181   /* Check if the current LWP has previously exited.  In the nptl
3182      thread model, LWPs other than the main thread do not issue
3183      signals when they exit so we must check whenever the thread has
3184      stopped.  A similar check is made in stop_wait_callback().  */
3185   if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3186     {
3187       ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3188 
3189       if (debug_linux_nat)
3190 	fprintf_unfiltered (gdb_stdlog,
3191 			    "LLW: %s exited.\n",
3192 			    target_pid_to_str (lp->ptid));
3193 
3194       exit_lwp (lp);
3195 
3196       /* Make sure there is at least one thread running.  */
3197       gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3198 
3199       /* Discard the event.  */
3200       return NULL;
3201     }
3202 
3203   /* Make sure we don't report a SIGSTOP that we sent ourselves in
3204      an attempt to stop an LWP.  */
3205   if (lp->signalled
3206       && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3207     {
3208       if (debug_linux_nat)
3209 	fprintf_unfiltered (gdb_stdlog,
3210 			    "LLW: Delayed SIGSTOP caught for %s.\n",
3211 			    target_pid_to_str (lp->ptid));
3212 
3213       /* This is a delayed SIGSTOP.  */
3214       lp->signalled = 0;
3215 
3216       registers_changed ();
3217 
3218       linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3219 			    lp->step, TARGET_SIGNAL_0);
3220       if (debug_linux_nat)
3221 	fprintf_unfiltered (gdb_stdlog,
3222 			    "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3223 			    lp->step ?
3224 			    "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3225 			    target_pid_to_str (lp->ptid));
3226 
3227       lp->stopped = 0;
3228       gdb_assert (lp->resumed);
3229 
3230       /* Discard the event.  */
3231       return NULL;
3232     }
3233 
3234   /* Make sure we don't report a SIGINT that we have already displayed
3235      for another thread.  */
3236   if (lp->ignore_sigint
3237       && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3238     {
3239       if (debug_linux_nat)
3240 	fprintf_unfiltered (gdb_stdlog,
3241 			    "LLW: Delayed SIGINT caught for %s.\n",
3242 			    target_pid_to_str (lp->ptid));
3243 
3244       /* This is a delayed SIGINT.  */
3245       lp->ignore_sigint = 0;
3246 
3247       registers_changed ();
3248       linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3249 			    lp->step, TARGET_SIGNAL_0);
3250       if (debug_linux_nat)
3251 	fprintf_unfiltered (gdb_stdlog,
3252 			    "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3253 			    lp->step ?
3254 			    "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3255 			    target_pid_to_str (lp->ptid));
3256 
3257       lp->stopped = 0;
3258       gdb_assert (lp->resumed);
3259 
3260       /* Discard the event.  */
3261       return NULL;
3262     }
3263 
3264   /* An interesting event.  */
3265   gdb_assert (lp);
3266   lp->status = status;
3267   return lp;
3268 }
3269 
3270 static ptid_t
3271 linux_nat_wait_1 (struct target_ops *ops,
3272 		  ptid_t ptid, struct target_waitstatus *ourstatus,
3273 		  int target_options)
3274 {
3275   static sigset_t prev_mask;
3276   struct lwp_info *lp = NULL;
3277   int options = 0;
3278   int status = 0;
3279   pid_t pid;
3280 
3281   if (debug_linux_nat_async)
3282     fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3283 
3284   /* The first time we get here after starting a new inferior, we may
3285      not have added it to the LWP list yet - this is the earliest
3286      moment at which we know its PID.  */
3287   if (ptid_is_pid (inferior_ptid))
3288     {
3289       /* Upgrade the main thread's ptid.  */
3290       thread_change_ptid (inferior_ptid,
3291 			  BUILD_LWP (GET_PID (inferior_ptid),
3292 				     GET_PID (inferior_ptid)));
3293 
3294       lp = add_lwp (inferior_ptid);
3295       lp->resumed = 1;
3296     }
3297 
3298   /* Make sure SIGCHLD is blocked.  */
3299   block_child_signals (&prev_mask);
3300 
3301   if (ptid_equal (ptid, minus_one_ptid))
3302     pid = -1;
3303   else if (ptid_is_pid (ptid))
3304     /* A request to wait for a specific tgid.  This is not possible
3305        with waitpid, so instead, we wait for any child, and leave
3306        children we're not interested in right now with a pending
3307        status to report later.  */
3308     pid = -1;
3309   else
3310     pid = GET_LWP (ptid);
3311 
3312 retry:
3313   lp = NULL;
3314   status = 0;
3315 
3316   /* Make sure that of those LWPs we want to get an event from, there
3317      is at least one LWP that has been resumed.  If there's none, just
3318      bail out.  The core may just be flushing asynchronously all
3319      events.  */
3320   if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3321     {
3322       ourstatus->kind = TARGET_WAITKIND_IGNORE;
3323 
3324       if (debug_linux_nat_async)
3325 	fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3326 
3327       restore_child_signals_mask (&prev_mask);
3328       return minus_one_ptid;
3329     }
3330 
3331   /* First check if there is a LWP with a wait status pending.  */
3332   if (pid == -1)
3333     {
3334       /* Any LWP that's been resumed will do.  */
3335       lp = iterate_over_lwps (ptid, status_callback, NULL);
3336       if (lp)
3337 	{
3338 	  if (debug_linux_nat && lp->status)
3339 	    fprintf_unfiltered (gdb_stdlog,
3340 				"LLW: Using pending wait status %s for %s.\n",
3341 				status_to_str (lp->status),
3342 				target_pid_to_str (lp->ptid));
3343 	}
3344 
3345       /* But if we don't find one, we'll have to wait, and check both
3346 	 cloned and uncloned processes.  We start with the cloned
3347 	 processes.  */
3348       options = __WCLONE | WNOHANG;
3349     }
3350   else if (is_lwp (ptid))
3351     {
3352       if (debug_linux_nat)
3353 	fprintf_unfiltered (gdb_stdlog,
3354 			    "LLW: Waiting for specific LWP %s.\n",
3355 			    target_pid_to_str (ptid));
3356 
3357       /* We have a specific LWP to check.  */
3358       lp = find_lwp_pid (ptid);
3359       gdb_assert (lp);
3360 
3361       if (debug_linux_nat && lp->status)
3362 	fprintf_unfiltered (gdb_stdlog,
3363 			    "LLW: Using pending wait status %s for %s.\n",
3364 			    status_to_str (lp->status),
3365 			    target_pid_to_str (lp->ptid));
3366 
3367       /* If we have to wait, take into account whether PID is a cloned
3368          process or not.  And we have to convert it to something that
3369          the layer beneath us can understand.  */
3370       options = lp->cloned ? __WCLONE : 0;
3371       pid = GET_LWP (ptid);
3372 
3373       /* We check for lp->waitstatus in addition to lp->status,
3374 	 because we can have pending process exits recorded in
3375 	 lp->status and W_EXITCODE(0,0) == 0.  We should probably have
3376 	 an additional lp->status_p flag.  */
3377       if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3378 	lp = NULL;
3379     }
3380 
3381   if (lp && lp->signalled)
3382     {
3383       /* A pending SIGSTOP may interfere with the normal stream of
3384          events.  In a typical case where interference is a problem,
3385          we have a SIGSTOP signal pending for LWP A while
3386          single-stepping it, encounter an event in LWP B, and take the
3387          pending SIGSTOP while trying to stop LWP A.  After processing
3388          the event in LWP B, LWP A is continued, and we'll never see
3389          the SIGTRAP associated with the last time we were
3390          single-stepping LWP A.  */
3391 
3392       /* Resume the thread.  It should halt immediately returning the
3393          pending SIGSTOP.  */
3394       registers_changed ();
3395       linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3396 			    lp->step, TARGET_SIGNAL_0);
3397       if (debug_linux_nat)
3398 	fprintf_unfiltered (gdb_stdlog,
3399 			    "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3400 			    lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3401 			    target_pid_to_str (lp->ptid));
3402       lp->stopped = 0;
3403       gdb_assert (lp->resumed);
3404 
3405       /* Catch the pending SIGSTOP.  */
3406       status = lp->status;
3407       lp->status = 0;
3408 
3409       stop_wait_callback (lp, NULL);
3410 
3411       /* If the lp->status field isn't empty, we caught another signal
3412 	 while flushing the SIGSTOP.  Return it back to the event
3413 	 queue of the LWP, as we already have an event to handle.  */
3414       if (lp->status)
3415 	{
3416 	  if (debug_linux_nat)
3417 	    fprintf_unfiltered (gdb_stdlog,
3418 				"LLW: kill %s, %s\n",
3419 				target_pid_to_str (lp->ptid),
3420 				status_to_str (lp->status));
3421 	  kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3422 	}
3423 
3424       lp->status = status;
3425     }
3426 
3427   if (!target_can_async_p ())
3428     {
3429       /* Causes SIGINT to be passed on to the attached process.  */
3430       set_sigint_trap ();
3431     }
3432 
3433   /* Translate generic target_wait options into waitpid options.  */
3434   if (target_options & TARGET_WNOHANG)
3435     options |= WNOHANG;
3436 
3437   while (lp == NULL)
3438     {
3439       pid_t lwpid;
3440 
3441       lwpid = my_waitpid (pid, &status, options);
3442 
3443       if (lwpid > 0)
3444 	{
3445 	  gdb_assert (pid == -1 || lwpid == pid);
3446 
3447 	  if (debug_linux_nat)
3448 	    {
3449 	      fprintf_unfiltered (gdb_stdlog,
3450 				  "LLW: waitpid %ld received %s\n",
3451 				  (long) lwpid, status_to_str (status));
3452 	    }
3453 
3454 	  lp = linux_nat_filter_event (lwpid, status, options);
3455 
3456 	  /* STATUS is now no longer valid, use LP->STATUS instead.  */
3457 	  status = 0;
3458 
3459 	  if (lp
3460 	      && ptid_is_pid (ptid)
3461 	      && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
3462 	    {
3463 	      gdb_assert (lp->resumed);
3464 
3465 	      if (debug_linux_nat)
3466 		fprintf (stderr,
3467 			 "LWP %ld got an event %06x, leaving pending.\n",
3468 			 ptid_get_lwp (lp->ptid), lp->status);
3469 
3470 	      if (WIFSTOPPED (lp->status))
3471 		{
3472 		  if (WSTOPSIG (lp->status) != SIGSTOP)
3473 		    {
3474 		      /* Cancel breakpoint hits.  The breakpoint may
3475 			 be removed before we fetch events from this
3476 			 process to report to the core.  It is best
3477 			 not to assume the moribund breakpoints
3478 			 heuristic always handles these cases --- it
3479 			 could be too many events go through to the
3480 			 core before this one is handled.  All-stop
3481 			 always cancels breakpoint hits in all
3482 			 threads.  */
3483 		      if (non_stop
3484 			  && linux_nat_lp_status_is_event (lp)
3485 			  && cancel_breakpoint (lp))
3486 			{
3487 			  /* Throw away the SIGTRAP.  */
3488 			  lp->status = 0;
3489 
3490 			  if (debug_linux_nat)
3491 			    fprintf (stderr,
3492 				     "LLW: LWP %ld hit a breakpoint while"
3493 				     " waiting for another process;"
3494 				     " cancelled it\n",
3495 				     ptid_get_lwp (lp->ptid));
3496 			}
3497 		      lp->stopped = 1;
3498 		    }
3499 		  else
3500 		    {
3501 		      lp->stopped = 1;
3502 		      lp->signalled = 0;
3503 		    }
3504 		}
3505 	      else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3506 		{
3507 		  if (debug_linux_nat)
3508 		    fprintf (stderr,
3509 			     "Process %ld exited while stopping LWPs\n",
3510 			     ptid_get_lwp (lp->ptid));
3511 
3512 		  /* This was the last lwp in the process.  Since
3513 		     events are serialized to GDB core, and we can't
3514 		     report this one right now, but GDB core and the
3515 		     other target layers will want to be notified
3516 		     about the exit code/signal, leave the status
3517 		     pending for the next time we're able to report
3518 		     it.  */
3519 
3520 		  /* Prevent trying to stop this thread again.  We'll
3521 		     never try to resume it because it has a pending
3522 		     status.  */
3523 		  lp->stopped = 1;
3524 
3525 		  /* Dead LWP's aren't expected to reported a pending
3526 		     sigstop.  */
3527 		  lp->signalled = 0;
3528 
3529 		  /* Store the pending event in the waitstatus as
3530 		     well, because W_EXITCODE(0,0) == 0.  */
3531 		  store_waitstatus (&lp->waitstatus, lp->status);
3532 		}
3533 
3534 	      /* Keep looking.  */
3535 	      lp = NULL;
3536 	      continue;
3537 	    }
3538 
3539 	  if (lp)
3540 	    break;
3541 	  else
3542 	    {
3543 	      if (pid == -1)
3544 		{
3545 		  /* waitpid did return something.  Restart over.  */
3546 		  options |= __WCLONE;
3547 		}
3548 	      continue;
3549 	    }
3550 	}
3551 
3552       if (pid == -1)
3553 	{
3554 	  /* Alternate between checking cloned and uncloned processes.  */
3555 	  options ^= __WCLONE;
3556 
3557 	  /* And every time we have checked both:
3558 	     In async mode, return to event loop;
3559 	     In sync mode, suspend waiting for a SIGCHLD signal.  */
3560 	  if (options & __WCLONE)
3561 	    {
3562 	      if (target_options & TARGET_WNOHANG)
3563 		{
3564 		  /* No interesting event.  */
3565 		  ourstatus->kind = TARGET_WAITKIND_IGNORE;
3566 
3567 		  if (debug_linux_nat_async)
3568 		    fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3569 
3570 		  restore_child_signals_mask (&prev_mask);
3571 		  return minus_one_ptid;
3572 		}
3573 
3574 	      sigsuspend (&suspend_mask);
3575 	    }
3576 	}
3577       else if (target_options & TARGET_WNOHANG)
3578 	{
3579 	  /* No interesting event for PID yet.  */
3580 	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
3581 
3582 	  if (debug_linux_nat_async)
3583 	    fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3584 
3585 	  restore_child_signals_mask (&prev_mask);
3586 	  return minus_one_ptid;
3587 	}
3588 
3589       /* We shouldn't end up here unless we want to try again.  */
3590       gdb_assert (lp == NULL);
3591     }
3592 
3593   if (!target_can_async_p ())
3594     clear_sigint_trap ();
3595 
3596   gdb_assert (lp);
3597 
3598   status = lp->status;
3599   lp->status = 0;
3600 
3601   /* Don't report signals that GDB isn't interested in, such as
3602      signals that are neither printed nor stopped upon.  Stopping all
3603      threads can be a bit time-consuming so if we want decent
3604      performance with heavily multi-threaded programs, especially when
3605      they're using a high frequency timer, we'd better avoid it if we
3606      can.  */
3607 
3608   if (WIFSTOPPED (status))
3609     {
3610       enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3611       struct inferior *inf;
3612 
3613       inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3614       gdb_assert (inf);
3615 
3616       /* Defer to common code if we get a signal while
3617 	 single-stepping, since that may need special care, e.g. to
3618 	 skip the signal handler, or, if we're gaining control of the
3619 	 inferior.  */
3620       if (!lp->step
3621 	  && inf->control.stop_soon == NO_STOP_QUIETLY
3622 	  && signal_stop_state (signo) == 0
3623 	  && signal_print_state (signo) == 0
3624 	  && signal_pass_state (signo) == 1)
3625 	{
3626 	  /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3627 	     here?  It is not clear we should.  GDB may not expect
3628 	     other threads to run.  On the other hand, not resuming
3629 	     newly attached threads may cause an unwanted delay in
3630 	     getting them running.  */
3631 	  registers_changed ();
3632 	  linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3633 				lp->step, signo);
3634 	  if (debug_linux_nat)
3635 	    fprintf_unfiltered (gdb_stdlog,
3636 				"LLW: %s %s, %s (preempt 'handle')\n",
3637 				lp->step ?
3638 				"PTRACE_SINGLESTEP" : "PTRACE_CONT",
3639 				target_pid_to_str (lp->ptid),
3640 				(signo != TARGET_SIGNAL_0
3641 				 ? strsignal (target_signal_to_host (signo))
3642 				 : "0"));
3643 	  lp->stopped = 0;
3644 	  goto retry;
3645 	}
3646 
3647       if (!non_stop)
3648 	{
3649 	  /* Only do the below in all-stop, as we currently use SIGINT
3650 	     to implement target_stop (see linux_nat_stop) in
3651 	     non-stop.  */
3652 	  if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3653 	    {
3654 	      /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3655 		 forwarded to the entire process group, that is, all LWPs
3656 		 will receive it - unless they're using CLONE_THREAD to
3657 		 share signals.  Since we only want to report it once, we
3658 		 mark it as ignored for all LWPs except this one.  */
3659 	      iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3660 					      set_ignore_sigint, NULL);
3661 	      lp->ignore_sigint = 0;
3662 	    }
3663 	  else
3664 	    maybe_clear_ignore_sigint (lp);
3665 	}
3666     }
3667 
3668   /* This LWP is stopped now.  */
3669   lp->stopped = 1;
3670 
3671   if (debug_linux_nat)
3672     fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3673 			status_to_str (status), target_pid_to_str (lp->ptid));
3674 
3675   if (!non_stop)
3676     {
3677       /* Now stop all other LWP's ...  */
3678       iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3679 
3680       /* ... and wait until all of them have reported back that
3681 	 they're no longer running.  */
3682       iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3683 
3684       /* If we're not waiting for a specific LWP, choose an event LWP
3685 	 from among those that have had events.  Giving equal priority
3686 	 to all LWPs that have had events helps prevent
3687 	 starvation.  */
3688       if (pid == -1)
3689 	select_event_lwp (ptid, &lp, &status);
3690 
3691       /* Now that we've selected our final event LWP, cancel any
3692 	 breakpoints in other LWPs that have hit a GDB breakpoint.
3693 	 See the comment in cancel_breakpoints_callback to find out
3694 	 why.  */
3695       iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3696 
3697       /* In all-stop, from the core's perspective, all LWPs are now
3698 	 stopped until a new resume action is sent over.  */
3699       iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3700     }
3701   else
3702     lp->resumed = 0;
3703 
3704   if (linux_nat_status_is_event (status))
3705     {
3706       if (debug_linux_nat)
3707 	fprintf_unfiltered (gdb_stdlog,
3708 			    "LLW: trap ptid is %s.\n",
3709 			    target_pid_to_str (lp->ptid));
3710     }
3711 
3712   if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3713     {
3714       *ourstatus = lp->waitstatus;
3715       lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3716     }
3717   else
3718     store_waitstatus (ourstatus, status);
3719 
3720   if (debug_linux_nat_async)
3721     fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3722 
3723   restore_child_signals_mask (&prev_mask);
3724 
3725   if (ourstatus->kind == TARGET_WAITKIND_EXITED
3726       || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3727     lp->core = -1;
3728   else
3729     lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3730 
3731   return lp->ptid;
3732 }
3733 
3734 /* Resume LWPs that are currently stopped without any pending status
3735    to report, but are resumed from the core's perspective.  */
3736 
3737 static int
3738 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3739 {
3740   ptid_t *wait_ptid_p = data;
3741 
3742   if (lp->stopped
3743       && lp->resumed
3744       && lp->status == 0
3745       && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3746     {
3747       gdb_assert (is_executing (lp->ptid));
3748 
3749       /* Don't bother if there's a breakpoint at PC that we'd hit
3750 	 immediately, and we're not waiting for this LWP.  */
3751       if (!ptid_match (lp->ptid, *wait_ptid_p))
3752 	{
3753 	  struct regcache *regcache = get_thread_regcache (lp->ptid);
3754 	  CORE_ADDR pc = regcache_read_pc (regcache);
3755 
3756 	  if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3757 	    return 0;
3758 	}
3759 
3760       if (debug_linux_nat)
3761 	fprintf_unfiltered (gdb_stdlog,
3762 			    "RSRL: resuming stopped-resumed LWP %s\n",
3763 			    target_pid_to_str (lp->ptid));
3764 
3765       linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3766 			    lp->step, TARGET_SIGNAL_0);
3767       lp->stopped = 0;
3768       memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3769       lp->stopped_by_watchpoint = 0;
3770     }
3771 
3772   return 0;
3773 }
3774 
3775 static ptid_t
3776 linux_nat_wait (struct target_ops *ops,
3777 		ptid_t ptid, struct target_waitstatus *ourstatus,
3778 		int target_options)
3779 {
3780   ptid_t event_ptid;
3781 
3782   if (debug_linux_nat)
3783     fprintf_unfiltered (gdb_stdlog,
3784 			"linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
3785 
3786   /* Flush the async file first.  */
3787   if (target_can_async_p ())
3788     async_file_flush ();
3789 
3790   /* Resume LWPs that are currently stopped without any pending status
3791      to report, but are resumed from the core's perspective.  LWPs get
3792      in this state if we find them stopping at a time we're not
3793      interested in reporting the event (target_wait on a
3794      specific_process, for example, see linux_nat_wait_1), and
3795      meanwhile the event became uninteresting.  Don't bother resuming
3796      LWPs we're not going to wait for if they'd stop immediately.  */
3797   if (non_stop)
3798     iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3799 
3800   event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3801 
3802   /* If we requested any event, and something came out, assume there
3803      may be more.  If we requested a specific lwp or process, also
3804      assume there may be more.  */
3805   if (target_can_async_p ()
3806       && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3807 	  || !ptid_equal (ptid, minus_one_ptid)))
3808     async_file_mark ();
3809 
3810   /* Get ready for the next event.  */
3811   if (target_can_async_p ())
3812     target_async (inferior_event_handler, 0);
3813 
3814   return event_ptid;
3815 }
3816 
3817 static int
3818 kill_callback (struct lwp_info *lp, void *data)
3819 {
3820   errno = 0;
3821   ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3822   if (debug_linux_nat)
3823     fprintf_unfiltered (gdb_stdlog,
3824 			"KC:  PTRACE_KILL %s, 0, 0 (%s)\n",
3825 			target_pid_to_str (lp->ptid),
3826 			errno ? safe_strerror (errno) : "OK");
3827 
3828   return 0;
3829 }
3830 
3831 static int
3832 kill_wait_callback (struct lwp_info *lp, void *data)
3833 {
3834   pid_t pid;
3835 
3836   /* We must make sure that there are no pending events (delayed
3837      SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3838      program doesn't interfere with any following debugging session.  */
3839 
3840   /* For cloned processes we must check both with __WCLONE and
3841      without, since the exit status of a cloned process isn't reported
3842      with __WCLONE.  */
3843   if (lp->cloned)
3844     {
3845       do
3846 	{
3847 	  pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
3848 	  if (pid != (pid_t) -1)
3849 	    {
3850 	      if (debug_linux_nat)
3851 		fprintf_unfiltered (gdb_stdlog,
3852 				    "KWC: wait %s received unknown.\n",
3853 				    target_pid_to_str (lp->ptid));
3854 	      /* The Linux kernel sometimes fails to kill a thread
3855 		 completely after PTRACE_KILL; that goes from the stop
3856 		 point in do_fork out to the one in
3857 		 get_signal_to_deliever and waits again.  So kill it
3858 		 again.  */
3859 	      kill_callback (lp, NULL);
3860 	    }
3861 	}
3862       while (pid == GET_LWP (lp->ptid));
3863 
3864       gdb_assert (pid == -1 && errno == ECHILD);
3865     }
3866 
3867   do
3868     {
3869       pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
3870       if (pid != (pid_t) -1)
3871 	{
3872 	  if (debug_linux_nat)
3873 	    fprintf_unfiltered (gdb_stdlog,
3874 				"KWC: wait %s received unk.\n",
3875 				target_pid_to_str (lp->ptid));
3876 	  /* See the call to kill_callback above.  */
3877 	  kill_callback (lp, NULL);
3878 	}
3879     }
3880   while (pid == GET_LWP (lp->ptid));
3881 
3882   gdb_assert (pid == -1 && errno == ECHILD);
3883   return 0;
3884 }
3885 
3886 static void
3887 linux_nat_kill (struct target_ops *ops)
3888 {
3889   struct target_waitstatus last;
3890   ptid_t last_ptid;
3891   int status;
3892 
3893   /* If we're stopped while forking and we haven't followed yet,
3894      kill the other task.  We need to do this first because the
3895      parent will be sleeping if this is a vfork.  */
3896 
3897   get_last_target_status (&last_ptid, &last);
3898 
3899   if (last.kind == TARGET_WAITKIND_FORKED
3900       || last.kind == TARGET_WAITKIND_VFORKED)
3901     {
3902       ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
3903       wait (&status);
3904     }
3905 
3906   if (forks_exist_p ())
3907     linux_fork_killall ();
3908   else
3909     {
3910       ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3911 
3912       /* Stop all threads before killing them, since ptrace requires
3913 	 that the thread is stopped to sucessfully PTRACE_KILL.  */
3914       iterate_over_lwps (ptid, stop_callback, NULL);
3915       /* ... and wait until all of them have reported back that
3916 	 they're no longer running.  */
3917       iterate_over_lwps (ptid, stop_wait_callback, NULL);
3918 
3919       /* Kill all LWP's ...  */
3920       iterate_over_lwps (ptid, kill_callback, NULL);
3921 
3922       /* ... and wait until we've flushed all events.  */
3923       iterate_over_lwps (ptid, kill_wait_callback, NULL);
3924     }
3925 
3926   target_mourn_inferior ();
3927 }
3928 
3929 static void
3930 linux_nat_mourn_inferior (struct target_ops *ops)
3931 {
3932   purge_lwp_list (ptid_get_pid (inferior_ptid));
3933 
3934   if (! forks_exist_p ())
3935     /* Normal case, no other forks available.  */
3936     linux_ops->to_mourn_inferior (ops);
3937   else
3938     /* Multi-fork case.  The current inferior_ptid has exited, but
3939        there are other viable forks to debug.  Delete the exiting
3940        one and context-switch to the first available.  */
3941     linux_fork_mourn_inferior ();
3942 }
3943 
3944 /* Convert a native/host siginfo object, into/from the siginfo in the
3945    layout of the inferiors' architecture.  */
3946 
3947 static void
3948 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3949 {
3950   int done = 0;
3951 
3952   if (linux_nat_siginfo_fixup != NULL)
3953     done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3954 
3955   /* If there was no callback, or the callback didn't do anything,
3956      then just do a straight memcpy.  */
3957   if (!done)
3958     {
3959       if (direction == 1)
3960 	memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3961       else
3962 	memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3963     }
3964 }
3965 
3966 static LONGEST
3967 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3968                     const char *annex, gdb_byte *readbuf,
3969 		    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3970 {
3971   int pid;
3972   struct siginfo siginfo;
3973   gdb_byte inf_siginfo[sizeof (struct siginfo)];
3974 
3975   gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3976   gdb_assert (readbuf || writebuf);
3977 
3978   pid = GET_LWP (inferior_ptid);
3979   if (pid == 0)
3980     pid = GET_PID (inferior_ptid);
3981 
3982   if (offset > sizeof (siginfo))
3983     return -1;
3984 
3985   errno = 0;
3986   ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3987   if (errno != 0)
3988     return -1;
3989 
3990   /* When GDB is built as a 64-bit application, ptrace writes into
3991      SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
3992      inferior with a 64-bit GDB should look the same as debugging it
3993      with a 32-bit GDB, we need to convert it.  GDB core always sees
3994      the converted layout, so any read/write will have to be done
3995      post-conversion.  */
3996   siginfo_fixup (&siginfo, inf_siginfo, 0);
3997 
3998   if (offset + len > sizeof (siginfo))
3999     len = sizeof (siginfo) - offset;
4000 
4001   if (readbuf != NULL)
4002     memcpy (readbuf, inf_siginfo + offset, len);
4003   else
4004     {
4005       memcpy (inf_siginfo + offset, writebuf, len);
4006 
4007       /* Convert back to ptrace layout before flushing it out.  */
4008       siginfo_fixup (&siginfo, inf_siginfo, 1);
4009 
4010       errno = 0;
4011       ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4012       if (errno != 0)
4013 	return -1;
4014     }
4015 
4016   return len;
4017 }
4018 
4019 static LONGEST
4020 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4021 			const char *annex, gdb_byte *readbuf,
4022 			const gdb_byte *writebuf,
4023 			ULONGEST offset, LONGEST len)
4024 {
4025   struct cleanup *old_chain;
4026   LONGEST xfer;
4027 
4028   if (object == TARGET_OBJECT_SIGNAL_INFO)
4029     return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4030 			       offset, len);
4031 
4032   /* The target is connected but no live inferior is selected.  Pass
4033      this request down to a lower stratum (e.g., the executable
4034      file).  */
4035   if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4036     return 0;
4037 
4038   old_chain = save_inferior_ptid ();
4039 
4040   if (is_lwp (inferior_ptid))
4041     inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4042 
4043   xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4044 				     offset, len);
4045 
4046   do_cleanups (old_chain);
4047   return xfer;
4048 }
4049 
4050 static int
4051 linux_thread_alive (ptid_t ptid)
4052 {
4053   int err, tmp_errno;
4054 
4055   gdb_assert (is_lwp (ptid));
4056 
4057   /* Send signal 0 instead of anything ptrace, because ptracing a
4058      running thread errors out claiming that the thread doesn't
4059      exist.  */
4060   err = kill_lwp (GET_LWP (ptid), 0);
4061   tmp_errno = errno;
4062   if (debug_linux_nat)
4063     fprintf_unfiltered (gdb_stdlog,
4064 			"LLTA: KILL(SIG0) %s (%s)\n",
4065 			target_pid_to_str (ptid),
4066 			err ? safe_strerror (tmp_errno) : "OK");
4067 
4068   if (err != 0)
4069     return 0;
4070 
4071   return 1;
4072 }
4073 
4074 static int
4075 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4076 {
4077   return linux_thread_alive (ptid);
4078 }
4079 
4080 static char *
4081 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4082 {
4083   static char buf[64];
4084 
4085   if (is_lwp (ptid)
4086       && (GET_PID (ptid) != GET_LWP (ptid)
4087 	  || num_lwps (GET_PID (ptid)) > 1))
4088     {
4089       snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4090       return buf;
4091     }
4092 
4093   return normal_pid_to_str (ptid);
4094 }
4095 
4096 static char *
4097 linux_nat_thread_name (struct thread_info *thr)
4098 {
4099   int pid = ptid_get_pid (thr->ptid);
4100   long lwp = ptid_get_lwp (thr->ptid);
4101 #define FORMAT "/proc/%d/task/%ld/comm"
4102   char buf[sizeof (FORMAT) + 30];
4103   FILE *comm_file;
4104   char *result = NULL;
4105 
4106   snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4107   comm_file = fopen (buf, "r");
4108   if (comm_file)
4109     {
4110       /* Not exported by the kernel, so we define it here.  */
4111 #define COMM_LEN 16
4112       static char line[COMM_LEN + 1];
4113 
4114       if (fgets (line, sizeof (line), comm_file))
4115 	{
4116 	  char *nl = strchr (line, '\n');
4117 
4118 	  if (nl)
4119 	    *nl = '\0';
4120 	  if (*line != '\0')
4121 	    result = line;
4122 	}
4123 
4124       fclose (comm_file);
4125     }
4126 
4127 #undef COMM_LEN
4128 #undef FORMAT
4129 
4130   return result;
4131 }
4132 
4133 /* Accepts an integer PID; Returns a string representing a file that
4134    can be opened to get the symbols for the child process.  */
4135 
4136 static char *
4137 linux_child_pid_to_exec_file (int pid)
4138 {
4139   char *name1, *name2;
4140 
4141   name1 = xmalloc (MAXPATHLEN);
4142   name2 = xmalloc (MAXPATHLEN);
4143   make_cleanup (xfree, name1);
4144   make_cleanup (xfree, name2);
4145   memset (name2, 0, MAXPATHLEN);
4146 
4147   sprintf (name1, "/proc/%d/exe", pid);
4148   if (readlink (name1, name2, MAXPATHLEN) > 0)
4149     return name2;
4150   else
4151     return name1;
4152 }
4153 
4154 /* Service function for corefiles and info proc.  */
4155 
4156 static int
4157 read_mapping (FILE *mapfile,
4158 	      long long *addr,
4159 	      long long *endaddr,
4160 	      char *permissions,
4161 	      long long *offset,
4162 	      char *device, long long *inode, char *filename)
4163 {
4164   int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4165 		    addr, endaddr, permissions, offset, device, inode);
4166 
4167   filename[0] = '\0';
4168   if (ret > 0 && ret != EOF)
4169     {
4170       /* Eat everything up to EOL for the filename.  This will prevent
4171          weird filenames (such as one with embedded whitespace) from
4172          confusing this code.  It also makes this code more robust in
4173          respect to annotations the kernel may add after the filename.
4174 
4175          Note the filename is used for informational purposes
4176          only.  */
4177       ret += fscanf (mapfile, "%[^\n]\n", filename);
4178     }
4179 
4180   return (ret != 0 && ret != EOF);
4181 }
4182 
4183 /* Fills the "to_find_memory_regions" target vector.  Lists the memory
4184    regions in the inferior for a corefile.  */
4185 
4186 static int
4187 linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
4188 {
4189   int pid = PIDGET (inferior_ptid);
4190   char mapsfilename[MAXPATHLEN];
4191   FILE *mapsfile;
4192   long long addr, endaddr, size, offset, inode;
4193   char permissions[8], device[8], filename[MAXPATHLEN];
4194   int read, write, exec;
4195   struct cleanup *cleanup;
4196 
4197   /* Compose the filename for the /proc memory map, and open it.  */
4198   sprintf (mapsfilename, "/proc/%d/maps", pid);
4199   if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
4200     error (_("Could not open %s."), mapsfilename);
4201   cleanup = make_cleanup_fclose (mapsfile);
4202 
4203   if (info_verbose)
4204     fprintf_filtered (gdb_stdout,
4205 		      "Reading memory regions from %s\n", mapsfilename);
4206 
4207   /* Now iterate until end-of-file.  */
4208   while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4209 		       &offset, &device[0], &inode, &filename[0]))
4210     {
4211       size = endaddr - addr;
4212 
4213       /* Get the segment's permissions.  */
4214       read = (strchr (permissions, 'r') != 0);
4215       write = (strchr (permissions, 'w') != 0);
4216       exec = (strchr (permissions, 'x') != 0);
4217 
4218       if (info_verbose)
4219 	{
4220 	  fprintf_filtered (gdb_stdout,
4221 			    "Save segment, %s bytes at %s (%c%c%c)",
4222 			    plongest (size), paddress (target_gdbarch, addr),
4223 			    read ? 'r' : ' ',
4224 			    write ? 'w' : ' ', exec ? 'x' : ' ');
4225 	  if (filename[0])
4226 	    fprintf_filtered (gdb_stdout, " for %s", filename);
4227 	  fprintf_filtered (gdb_stdout, "\n");
4228 	}
4229 
4230       /* Invoke the callback function to create the corefile
4231 	 segment.  */
4232       func (addr, size, read, write, exec, obfd);
4233     }
4234   do_cleanups (cleanup);
4235   return 0;
4236 }
4237 
4238 static int
4239 find_signalled_thread (struct thread_info *info, void *data)
4240 {
4241   if (info->suspend.stop_signal != TARGET_SIGNAL_0
4242       && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4243     return 1;
4244 
4245   return 0;
4246 }
4247 
4248 static enum target_signal
4249 find_stop_signal (void)
4250 {
4251   struct thread_info *info =
4252     iterate_over_threads (find_signalled_thread, NULL);
4253 
4254   if (info)
4255     return info->suspend.stop_signal;
4256   else
4257     return TARGET_SIGNAL_0;
4258 }
4259 
4260 /* Records the thread's register state for the corefile note
4261    section.  */
4262 
4263 static char *
4264 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
4265 			       char *note_data, int *note_size,
4266 			       enum target_signal stop_signal)
4267 {
4268   unsigned long lwp = ptid_get_lwp (ptid);
4269   struct gdbarch *gdbarch = target_gdbarch;
4270   struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4271   const struct regset *regset;
4272   int core_regset_p;
4273   struct cleanup *old_chain;
4274   struct core_regset_section *sect_list;
4275   char *gdb_regset;
4276 
4277   old_chain = save_inferior_ptid ();
4278   inferior_ptid = ptid;
4279   target_fetch_registers (regcache, -1);
4280   do_cleanups (old_chain);
4281 
4282   core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4283   sect_list = gdbarch_core_regset_sections (gdbarch);
4284 
4285   /* The loop below uses the new struct core_regset_section, which stores
4286      the supported section names and sizes for the core file.  Note that
4287      note PRSTATUS needs to be treated specially.  But the other notes are
4288      structurally the same, so they can benefit from the new struct.  */
4289   if (core_regset_p && sect_list != NULL)
4290     while (sect_list->sect_name != NULL)
4291       {
4292 	regset = gdbarch_regset_from_core_section (gdbarch,
4293 						   sect_list->sect_name,
4294 						   sect_list->size);
4295 	gdb_assert (regset && regset->collect_regset);
4296 	gdb_regset = xmalloc (sect_list->size);
4297 	regset->collect_regset (regset, regcache, -1,
4298 				gdb_regset, sect_list->size);
4299 
4300 	if (strcmp (sect_list->sect_name, ".reg") == 0)
4301 	  note_data = (char *) elfcore_write_prstatus
4302 				(obfd, note_data, note_size,
4303 				 lwp, target_signal_to_host (stop_signal),
4304 				 gdb_regset);
4305 	else
4306 	  note_data = (char *) elfcore_write_register_note
4307 				(obfd, note_data, note_size,
4308 				 sect_list->sect_name, gdb_regset,
4309 				 sect_list->size);
4310 	xfree (gdb_regset);
4311 	sect_list++;
4312       }
4313 
4314   /* For architectures that does not have the struct core_regset_section
4315      implemented, we use the old method.  When all the architectures have
4316      the new support, the code below should be deleted.  */
4317   else
4318     {
4319       gdb_gregset_t gregs;
4320       gdb_fpregset_t fpregs;
4321 
4322       if (core_regset_p
4323 	  && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4324 							 sizeof (gregs)))
4325 	  != NULL && regset->collect_regset != NULL)
4326 	regset->collect_regset (regset, regcache, -1,
4327 				&gregs, sizeof (gregs));
4328       else
4329 	fill_gregset (regcache, &gregs, -1);
4330 
4331       note_data = (char *) elfcore_write_prstatus
4332 	(obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4333 	 &gregs);
4334 
4335       if (core_regset_p
4336           && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4337 							 sizeof (fpregs)))
4338 	  != NULL && regset->collect_regset != NULL)
4339 	regset->collect_regset (regset, regcache, -1,
4340 				&fpregs, sizeof (fpregs));
4341       else
4342 	fill_fpregset (regcache, &fpregs, -1);
4343 
4344       note_data = (char *) elfcore_write_prfpreg (obfd,
4345 						  note_data,
4346 						  note_size,
4347 						  &fpregs, sizeof (fpregs));
4348     }
4349 
4350   return note_data;
4351 }
4352 
4353 struct linux_nat_corefile_thread_data
4354 {
4355   bfd *obfd;
4356   char *note_data;
4357   int *note_size;
4358   int num_notes;
4359   enum target_signal stop_signal;
4360 };
4361 
4362 /* Called by gdbthread.c once per thread.  Records the thread's
4363    register state for the corefile note section.  */
4364 
4365 static int
4366 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4367 {
4368   struct linux_nat_corefile_thread_data *args = data;
4369 
4370   args->note_data = linux_nat_do_thread_registers (args->obfd,
4371 						   ti->ptid,
4372 						   args->note_data,
4373 						   args->note_size,
4374 						   args->stop_signal);
4375   args->num_notes++;
4376 
4377   return 0;
4378 }
4379 
4380 /* Enumerate spufs IDs for process PID.  */
4381 
4382 static void
4383 iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4384 {
4385   char path[128];
4386   DIR *dir;
4387   struct dirent *entry;
4388 
4389   xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4390   dir = opendir (path);
4391   if (!dir)
4392     return;
4393 
4394   rewinddir (dir);
4395   while ((entry = readdir (dir)) != NULL)
4396     {
4397       struct stat st;
4398       struct statfs stfs;
4399       int fd;
4400 
4401       fd = atoi (entry->d_name);
4402       if (!fd)
4403 	continue;
4404 
4405       xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4406       if (stat (path, &st) != 0)
4407 	continue;
4408       if (!S_ISDIR (st.st_mode))
4409 	continue;
4410 
4411       if (statfs (path, &stfs) != 0)
4412 	continue;
4413       if (stfs.f_type != SPUFS_MAGIC)
4414 	continue;
4415 
4416       callback (data, fd);
4417     }
4418 
4419   closedir (dir);
4420 }
4421 
4422 /* Generate corefile notes for SPU contexts.  */
4423 
4424 struct linux_spu_corefile_data
4425 {
4426   bfd *obfd;
4427   char *note_data;
4428   int *note_size;
4429 };
4430 
4431 static void
4432 linux_spu_corefile_callback (void *data, int fd)
4433 {
4434   struct linux_spu_corefile_data *args = data;
4435   int i;
4436 
4437   static const char *spu_files[] =
4438     {
4439       "object-id",
4440       "mem",
4441       "regs",
4442       "fpcr",
4443       "lslr",
4444       "decr",
4445       "decr_status",
4446       "signal1",
4447       "signal1_type",
4448       "signal2",
4449       "signal2_type",
4450       "event_mask",
4451       "event_status",
4452       "mbox_info",
4453       "ibox_info",
4454       "wbox_info",
4455       "dma_info",
4456       "proxydma_info",
4457    };
4458 
4459   for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4460     {
4461       char annex[32], note_name[32];
4462       gdb_byte *spu_data;
4463       LONGEST spu_len;
4464 
4465       xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4466       spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4467 				   annex, &spu_data);
4468       if (spu_len > 0)
4469 	{
4470 	  xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4471 	  args->note_data = elfcore_write_note (args->obfd, args->note_data,
4472 						args->note_size, note_name,
4473 						NT_SPU, spu_data, spu_len);
4474 	  xfree (spu_data);
4475 	}
4476     }
4477 }
4478 
4479 static char *
4480 linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4481 {
4482   struct linux_spu_corefile_data args;
4483 
4484   args.obfd = obfd;
4485   args.note_data = note_data;
4486   args.note_size = note_size;
4487 
4488   iterate_over_spus (PIDGET (inferior_ptid),
4489 		     linux_spu_corefile_callback, &args);
4490 
4491   return args.note_data;
4492 }
4493 
4494 /* Fills the "to_make_corefile_note" target vector.  Builds the note
4495    section for a corefile, and returns it in a malloc buffer.  */
4496 
4497 static char *
4498 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4499 {
4500   struct linux_nat_corefile_thread_data thread_args;
4501   /* The variable size must be >= sizeof (prpsinfo_t.pr_fname).  */
4502   char fname[16] = { '\0' };
4503   /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs).  */
4504   char psargs[80] = { '\0' };
4505   char *note_data = NULL;
4506   ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
4507   gdb_byte *auxv;
4508   int auxv_len;
4509 
4510   if (get_exec_file (0))
4511     {
4512       strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
4513       strncpy (psargs, get_exec_file (0), sizeof (psargs));
4514       if (get_inferior_args ())
4515 	{
4516 	  char *string_end;
4517 	  char *psargs_end = psargs + sizeof (psargs);
4518 
4519 	  /* linux_elfcore_write_prpsinfo () handles zero unterminated
4520 	     strings fine.  */
4521 	  string_end = memchr (psargs, 0, sizeof (psargs));
4522 	  if (string_end != NULL)
4523 	    {
4524 	      *string_end++ = ' ';
4525 	      strncpy (string_end, get_inferior_args (),
4526 		       psargs_end - string_end);
4527 	    }
4528 	}
4529       note_data = (char *) elfcore_write_prpsinfo (obfd,
4530 						   note_data,
4531 						   note_size, fname, psargs);
4532     }
4533 
4534   /* Dump information for threads.  */
4535   thread_args.obfd = obfd;
4536   thread_args.note_data = note_data;
4537   thread_args.note_size = note_size;
4538   thread_args.num_notes = 0;
4539   thread_args.stop_signal = find_stop_signal ();
4540   iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
4541   gdb_assert (thread_args.num_notes != 0);
4542   note_data = thread_args.note_data;
4543 
4544   auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4545 				NULL, &auxv);
4546   if (auxv_len > 0)
4547     {
4548       note_data = elfcore_write_note (obfd, note_data, note_size,
4549 				      "CORE", NT_AUXV, auxv, auxv_len);
4550       xfree (auxv);
4551     }
4552 
4553   note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4554 
4555   make_cleanup (xfree, note_data);
4556   return note_data;
4557 }
4558 
4559 /* Implement the "info proc" command.  */
4560 
4561 static void
4562 linux_nat_info_proc_cmd (char *args, int from_tty)
4563 {
4564   /* A long is used for pid instead of an int to avoid a loss of precision
4565      compiler warning from the output of strtoul.  */
4566   long pid = PIDGET (inferior_ptid);
4567   FILE *procfile;
4568   char **argv = NULL;
4569   char buffer[MAXPATHLEN];
4570   char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4571   int cmdline_f = 1;
4572   int cwd_f = 1;
4573   int exe_f = 1;
4574   int mappings_f = 0;
4575   int status_f = 0;
4576   int stat_f = 0;
4577   int all = 0;
4578   struct stat dummy;
4579 
4580   if (args)
4581     {
4582       /* Break up 'args' into an argv array.  */
4583       argv = gdb_buildargv (args);
4584       make_cleanup_freeargv (argv);
4585     }
4586   while (argv != NULL && *argv != NULL)
4587     {
4588       if (isdigit (argv[0][0]))
4589 	{
4590 	  pid = strtoul (argv[0], NULL, 10);
4591 	}
4592       else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4593 	{
4594 	  mappings_f = 1;
4595 	}
4596       else if (strcmp (argv[0], "status") == 0)
4597 	{
4598 	  status_f = 1;
4599 	}
4600       else if (strcmp (argv[0], "stat") == 0)
4601 	{
4602 	  stat_f = 1;
4603 	}
4604       else if (strcmp (argv[0], "cmd") == 0)
4605 	{
4606 	  cmdline_f = 1;
4607 	}
4608       else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4609 	{
4610 	  exe_f = 1;
4611 	}
4612       else if (strcmp (argv[0], "cwd") == 0)
4613 	{
4614 	  cwd_f = 1;
4615 	}
4616       else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4617 	{
4618 	  all = 1;
4619 	}
4620       else
4621 	{
4622 	  /* [...] (future options here).  */
4623 	}
4624       argv++;
4625     }
4626   if (pid == 0)
4627     error (_("No current process: you must name one."));
4628 
4629   sprintf (fname1, "/proc/%ld", pid);
4630   if (stat (fname1, &dummy) != 0)
4631     error (_("No /proc directory: '%s'"), fname1);
4632 
4633   printf_filtered (_("process %ld\n"), pid);
4634   if (cmdline_f || all)
4635     {
4636       sprintf (fname1, "/proc/%ld/cmdline", pid);
4637       if ((procfile = fopen (fname1, "r")) != NULL)
4638 	{
4639 	  struct cleanup *cleanup = make_cleanup_fclose (procfile);
4640 
4641           if (fgets (buffer, sizeof (buffer), procfile))
4642             printf_filtered ("cmdline = '%s'\n", buffer);
4643           else
4644             warning (_("unable to read '%s'"), fname1);
4645 	  do_cleanups (cleanup);
4646 	}
4647       else
4648 	warning (_("unable to open /proc file '%s'"), fname1);
4649     }
4650   if (cwd_f || all)
4651     {
4652       sprintf (fname1, "/proc/%ld/cwd", pid);
4653       memset (fname2, 0, sizeof (fname2));
4654       if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4655 	printf_filtered ("cwd = '%s'\n", fname2);
4656       else
4657 	warning (_("unable to read link '%s'"), fname1);
4658     }
4659   if (exe_f || all)
4660     {
4661       sprintf (fname1, "/proc/%ld/exe", pid);
4662       memset (fname2, 0, sizeof (fname2));
4663       if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4664 	printf_filtered ("exe = '%s'\n", fname2);
4665       else
4666 	warning (_("unable to read link '%s'"), fname1);
4667     }
4668   if (mappings_f || all)
4669     {
4670       sprintf (fname1, "/proc/%ld/maps", pid);
4671       if ((procfile = fopen (fname1, "r")) != NULL)
4672 	{
4673 	  long long addr, endaddr, size, offset, inode;
4674 	  char permissions[8], device[8], filename[MAXPATHLEN];
4675 	  struct cleanup *cleanup;
4676 
4677 	  cleanup = make_cleanup_fclose (procfile);
4678 	  printf_filtered (_("Mapped address spaces:\n\n"));
4679 	  if (gdbarch_addr_bit (target_gdbarch) == 32)
4680 	    {
4681 	      printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4682 			   "Start Addr",
4683 			   "  End Addr",
4684 			   "      Size", "    Offset", "objfile");
4685             }
4686 	  else
4687             {
4688 	      printf_filtered ("  %18s %18s %10s %10s %7s\n",
4689 			   "Start Addr",
4690 			   "  End Addr",
4691 			   "      Size", "    Offset", "objfile");
4692 	    }
4693 
4694 	  while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4695 			       &offset, &device[0], &inode, &filename[0]))
4696 	    {
4697 	      size = endaddr - addr;
4698 
4699 	      /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4700 		 calls here (and possibly above) should be abstracted
4701 		 out into their own functions?  Andrew suggests using
4702 		 a generic local_address_string instead to print out
4703 		 the addresses; that makes sense to me, too.  */
4704 
4705 	      if (gdbarch_addr_bit (target_gdbarch) == 32)
4706 	        {
4707 	          printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4708 			       (unsigned long) addr,	/* FIXME: pr_addr */
4709 			       (unsigned long) endaddr,
4710 			       (int) size,
4711 			       (unsigned int) offset,
4712 			       filename[0] ? filename : "");
4713 		}
4714 	      else
4715 	        {
4716 	          printf_filtered ("  %#18lx %#18lx %#10x %#10x %7s\n",
4717 			       (unsigned long) addr,	/* FIXME: pr_addr */
4718 			       (unsigned long) endaddr,
4719 			       (int) size,
4720 			       (unsigned int) offset,
4721 			       filename[0] ? filename : "");
4722 	        }
4723 	    }
4724 
4725 	  do_cleanups (cleanup);
4726 	}
4727       else
4728 	warning (_("unable to open /proc file '%s'"), fname1);
4729     }
4730   if (status_f || all)
4731     {
4732       sprintf (fname1, "/proc/%ld/status", pid);
4733       if ((procfile = fopen (fname1, "r")) != NULL)
4734 	{
4735 	  struct cleanup *cleanup = make_cleanup_fclose (procfile);
4736 
4737 	  while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4738 	    puts_filtered (buffer);
4739 	  do_cleanups (cleanup);
4740 	}
4741       else
4742 	warning (_("unable to open /proc file '%s'"), fname1);
4743     }
4744   if (stat_f || all)
4745     {
4746       sprintf (fname1, "/proc/%ld/stat", pid);
4747       if ((procfile = fopen (fname1, "r")) != NULL)
4748 	{
4749 	  int itmp;
4750 	  char ctmp;
4751 	  long ltmp;
4752 	  struct cleanup *cleanup = make_cleanup_fclose (procfile);
4753 
4754 	  if (fscanf (procfile, "%d ", &itmp) > 0)
4755 	    printf_filtered (_("Process: %d\n"), itmp);
4756 	  if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
4757 	    printf_filtered (_("Exec file: %s\n"), buffer);
4758 	  if (fscanf (procfile, "%c ", &ctmp) > 0)
4759 	    printf_filtered (_("State: %c\n"), ctmp);
4760 	  if (fscanf (procfile, "%d ", &itmp) > 0)
4761 	    printf_filtered (_("Parent process: %d\n"), itmp);
4762 	  if (fscanf (procfile, "%d ", &itmp) > 0)
4763 	    printf_filtered (_("Process group: %d\n"), itmp);
4764 	  if (fscanf (procfile, "%d ", &itmp) > 0)
4765 	    printf_filtered (_("Session id: %d\n"), itmp);
4766 	  if (fscanf (procfile, "%d ", &itmp) > 0)
4767 	    printf_filtered (_("TTY: %d\n"), itmp);
4768 	  if (fscanf (procfile, "%d ", &itmp) > 0)
4769 	    printf_filtered (_("TTY owner process group: %d\n"), itmp);
4770 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4771 	    printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4772 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4773 	    printf_filtered (_("Minor faults (no memory page): %lu\n"),
4774 			     (unsigned long) ltmp);
4775 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4776 	    printf_filtered (_("Minor faults, children: %lu\n"),
4777 			     (unsigned long) ltmp);
4778 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4779 	    printf_filtered (_("Major faults (memory page faults): %lu\n"),
4780 			     (unsigned long) ltmp);
4781 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4782 	    printf_filtered (_("Major faults, children: %lu\n"),
4783 			     (unsigned long) ltmp);
4784 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4785 	    printf_filtered (_("utime: %ld\n"), ltmp);
4786 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4787 	    printf_filtered (_("stime: %ld\n"), ltmp);
4788 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4789 	    printf_filtered (_("utime, children: %ld\n"), ltmp);
4790 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4791 	    printf_filtered (_("stime, children: %ld\n"), ltmp);
4792 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4793 	    printf_filtered (_("jiffies remaining in current "
4794 			       "time slice: %ld\n"), ltmp);
4795 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4796 	    printf_filtered (_("'nice' value: %ld\n"), ltmp);
4797 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4798 	    printf_filtered (_("jiffies until next timeout: %lu\n"),
4799 			     (unsigned long) ltmp);
4800 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4801 	    printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4802 			     (unsigned long) ltmp);
4803 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4804 	    printf_filtered (_("start time (jiffies since "
4805 			       "system boot): %ld\n"), ltmp);
4806 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4807 	    printf_filtered (_("Virtual memory size: %lu\n"),
4808 			     (unsigned long) ltmp);
4809 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4810 	    printf_filtered (_("Resident set size: %lu\n"),
4811 			     (unsigned long) ltmp);
4812 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4813 	    printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4814 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4815 	    printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4816 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4817 	    printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4818 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)
4819 	    printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
4820 #if 0	/* Don't know how architecture-dependent the rest is...
4821 	   Anyway the signal bitmap info is available from "status".  */
4822 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)	/* FIXME arch?  */
4823 	    printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
4824 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)	/* FIXME arch?  */
4825 	    printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4826 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4827 	    printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4828 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4829 	    printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4830 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4831 	    printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4832 	  if (fscanf (procfile, "%ld ", &ltmp) > 0)
4833 	    printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
4834 	  if (fscanf (procfile, "%lu ", &ltmp) > 0)	/* FIXME arch?  */
4835 	    printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
4836 #endif
4837 	  do_cleanups (cleanup);
4838 	}
4839       else
4840 	warning (_("unable to open /proc file '%s'"), fname1);
4841     }
4842 }
4843 
4844 /* Implement the to_xfer_partial interface for memory reads using the /proc
4845    filesystem.  Because we can use a single read() call for /proc, this
4846    can be much more efficient than banging away at PTRACE_PEEKTEXT,
4847    but it doesn't support writes.  */
4848 
4849 static LONGEST
4850 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4851 			 const char *annex, gdb_byte *readbuf,
4852 			 const gdb_byte *writebuf,
4853 			 ULONGEST offset, LONGEST len)
4854 {
4855   LONGEST ret;
4856   int fd;
4857   char filename[64];
4858 
4859   if (object != TARGET_OBJECT_MEMORY || !readbuf)
4860     return 0;
4861 
4862   /* Don't bother for one word.  */
4863   if (len < 3 * sizeof (long))
4864     return 0;
4865 
4866   /* We could keep this file open and cache it - possibly one per
4867      thread.  That requires some juggling, but is even faster.  */
4868   sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4869   fd = open (filename, O_RDONLY | O_LARGEFILE);
4870   if (fd == -1)
4871     return 0;
4872 
4873   /* If pread64 is available, use it.  It's faster if the kernel
4874      supports it (only one syscall), and it's 64-bit safe even on
4875      32-bit platforms (for instance, SPARC debugging a SPARC64
4876      application).  */
4877 #ifdef HAVE_PREAD64
4878   if (pread64 (fd, readbuf, len, offset) != len)
4879 #else
4880   if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4881 #endif
4882     ret = 0;
4883   else
4884     ret = len;
4885 
4886   close (fd);
4887   return ret;
4888 }
4889 
4890 
4891 /* Enumerate spufs IDs for process PID.  */
4892 static LONGEST
4893 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4894 {
4895   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4896   LONGEST pos = 0;
4897   LONGEST written = 0;
4898   char path[128];
4899   DIR *dir;
4900   struct dirent *entry;
4901 
4902   xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4903   dir = opendir (path);
4904   if (!dir)
4905     return -1;
4906 
4907   rewinddir (dir);
4908   while ((entry = readdir (dir)) != NULL)
4909     {
4910       struct stat st;
4911       struct statfs stfs;
4912       int fd;
4913 
4914       fd = atoi (entry->d_name);
4915       if (!fd)
4916 	continue;
4917 
4918       xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4919       if (stat (path, &st) != 0)
4920 	continue;
4921       if (!S_ISDIR (st.st_mode))
4922 	continue;
4923 
4924       if (statfs (path, &stfs) != 0)
4925 	continue;
4926       if (stfs.f_type != SPUFS_MAGIC)
4927 	continue;
4928 
4929       if (pos >= offset && pos + 4 <= offset + len)
4930 	{
4931 	  store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4932 	  written += 4;
4933 	}
4934       pos += 4;
4935     }
4936 
4937   closedir (dir);
4938   return written;
4939 }
4940 
4941 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4942    object type, using the /proc file system.  */
4943 static LONGEST
4944 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4945 		     const char *annex, gdb_byte *readbuf,
4946 		     const gdb_byte *writebuf,
4947 		     ULONGEST offset, LONGEST len)
4948 {
4949   char buf[128];
4950   int fd = 0;
4951   int ret = -1;
4952   int pid = PIDGET (inferior_ptid);
4953 
4954   if (!annex)
4955     {
4956       if (!readbuf)
4957 	return -1;
4958       else
4959 	return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4960     }
4961 
4962   xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4963   fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4964   if (fd <= 0)
4965     return -1;
4966 
4967   if (offset != 0
4968       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4969     {
4970       close (fd);
4971       return 0;
4972     }
4973 
4974   if (writebuf)
4975     ret = write (fd, writebuf, (size_t) len);
4976   else if (readbuf)
4977     ret = read (fd, readbuf, (size_t) len);
4978 
4979   close (fd);
4980   return ret;
4981 }
4982 
4983 
4984 /* Parse LINE as a signal set and add its set bits to SIGS.  */
4985 
4986 static void
4987 add_line_to_sigset (const char *line, sigset_t *sigs)
4988 {
4989   int len = strlen (line) - 1;
4990   const char *p;
4991   int signum;
4992 
4993   if (line[len] != '\n')
4994     error (_("Could not parse signal set: %s"), line);
4995 
4996   p = line;
4997   signum = len * 4;
4998   while (len-- > 0)
4999     {
5000       int digit;
5001 
5002       if (*p >= '0' && *p <= '9')
5003 	digit = *p - '0';
5004       else if (*p >= 'a' && *p <= 'f')
5005 	digit = *p - 'a' + 10;
5006       else
5007 	error (_("Could not parse signal set: %s"), line);
5008 
5009       signum -= 4;
5010 
5011       if (digit & 1)
5012 	sigaddset (sigs, signum + 1);
5013       if (digit & 2)
5014 	sigaddset (sigs, signum + 2);
5015       if (digit & 4)
5016 	sigaddset (sigs, signum + 3);
5017       if (digit & 8)
5018 	sigaddset (sigs, signum + 4);
5019 
5020       p++;
5021     }
5022 }
5023 
5024 /* Find process PID's pending signals from /proc/pid/status and set
5025    SIGS to match.  */
5026 
5027 void
5028 linux_proc_pending_signals (int pid, sigset_t *pending,
5029 			    sigset_t *blocked, sigset_t *ignored)
5030 {
5031   FILE *procfile;
5032   char buffer[MAXPATHLEN], fname[MAXPATHLEN];
5033   struct cleanup *cleanup;
5034 
5035   sigemptyset (pending);
5036   sigemptyset (blocked);
5037   sigemptyset (ignored);
5038   sprintf (fname, "/proc/%d/status", pid);
5039   procfile = fopen (fname, "r");
5040   if (procfile == NULL)
5041     error (_("Could not open %s"), fname);
5042   cleanup = make_cleanup_fclose (procfile);
5043 
5044   while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5045     {
5046       /* Normal queued signals are on the SigPnd line in the status
5047 	 file.  However, 2.6 kernels also have a "shared" pending
5048 	 queue for delivering signals to a thread group, so check for
5049 	 a ShdPnd line also.
5050 
5051 	 Unfortunately some Red Hat kernels include the shared pending
5052 	 queue but not the ShdPnd status field.  */
5053 
5054       if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5055 	add_line_to_sigset (buffer + 8, pending);
5056       else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5057 	add_line_to_sigset (buffer + 8, pending);
5058       else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5059 	add_line_to_sigset (buffer + 8, blocked);
5060       else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5061 	add_line_to_sigset (buffer + 8, ignored);
5062     }
5063 
5064   do_cleanups (cleanup);
5065 }
5066 
5067 static LONGEST
5068 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
5069 		       const char *annex, gdb_byte *readbuf,
5070 		       const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5071 {
5072   /* We make the process list snapshot when the object starts to be
5073      read.  */
5074   static const char *buf;
5075   static LONGEST len_avail = -1;
5076   static struct obstack obstack;
5077 
5078   DIR *dirp;
5079 
5080   gdb_assert (object == TARGET_OBJECT_OSDATA);
5081 
5082   if (!annex)
5083     {
5084       if (offset == 0)
5085 	{
5086 	  if (len_avail != -1 && len_avail != 0)
5087 	    obstack_free (&obstack, NULL);
5088 	  len_avail = 0;
5089 	  buf = NULL;
5090 	  obstack_init (&obstack);
5091 	  obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
5092 
5093 	  obstack_xml_printf (&obstack,
5094 			      "<item>"
5095 			      "<column name=\"Type\">processes</column>"
5096 			      "<column name=\"Description\">"
5097 			      "Listing of all processes</column>"
5098 			      "</item>");
5099 
5100 	  obstack_grow_str0 (&obstack, "</osdata>\n");
5101 	  buf = obstack_finish (&obstack);
5102 	  len_avail = strlen (buf);
5103 	}
5104 
5105       if (offset >= len_avail)
5106 	{
5107 	  /* Done.  Get rid of the obstack.  */
5108 	  obstack_free (&obstack, NULL);
5109 	  buf = NULL;
5110 	  len_avail = 0;
5111 	  return 0;
5112 	}
5113 
5114       if (len > len_avail - offset)
5115 	len = len_avail - offset;
5116       memcpy (readbuf, buf + offset, len);
5117 
5118       return len;
5119     }
5120 
5121   if (strcmp (annex, "processes") != 0)
5122     return 0;
5123 
5124   gdb_assert (readbuf && !writebuf);
5125 
5126   if (offset == 0)
5127     {
5128       if (len_avail != -1 && len_avail != 0)
5129 	obstack_free (&obstack, NULL);
5130       len_avail = 0;
5131       buf = NULL;
5132       obstack_init (&obstack);
5133       obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
5134 
5135       dirp = opendir ("/proc");
5136       if (dirp)
5137 	{
5138 	  struct dirent *dp;
5139 
5140 	  while ((dp = readdir (dirp)) != NULL)
5141 	    {
5142 	      struct stat statbuf;
5143 	      char procentry[sizeof ("/proc/4294967295")];
5144 
5145 	      if (!isdigit (dp->d_name[0])
5146 		  || NAMELEN (dp) > sizeof ("4294967295") - 1)
5147 		continue;
5148 
5149 	      sprintf (procentry, "/proc/%s", dp->d_name);
5150 	      if (stat (procentry, &statbuf) == 0
5151 		  && S_ISDIR (statbuf.st_mode))
5152 		{
5153 		  char *pathname;
5154 		  FILE *f;
5155 		  char cmd[MAXPATHLEN + 1];
5156 		  struct passwd *entry;
5157 
5158 		  pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5159 		  entry = getpwuid (statbuf.st_uid);
5160 
5161 		  if ((f = fopen (pathname, "r")) != NULL)
5162 		    {
5163 		      size_t length = fread (cmd, 1, sizeof (cmd) - 1, f);
5164 
5165 		      if (length > 0)
5166 			{
5167 			  int i;
5168 
5169 			  for (i = 0; i < length; i++)
5170 			    if (cmd[i] == '\0')
5171 			      cmd[i] = ' ';
5172 			  cmd[length] = '\0';
5173 
5174 			  obstack_xml_printf (
5175 			    &obstack,
5176 			    "<item>"
5177 			    "<column name=\"pid\">%s</column>"
5178 			    "<column name=\"user\">%s</column>"
5179 			    "<column name=\"command\">%s</column>"
5180 			    "</item>",
5181 			    dp->d_name,
5182 			    entry ? entry->pw_name : "?",
5183 			    cmd);
5184 			}
5185 		      fclose (f);
5186 		    }
5187 
5188 		  xfree (pathname);
5189 		}
5190 	    }
5191 
5192 	  closedir (dirp);
5193 	}
5194 
5195       obstack_grow_str0 (&obstack, "</osdata>\n");
5196       buf = obstack_finish (&obstack);
5197       len_avail = strlen (buf);
5198     }
5199 
5200   if (offset >= len_avail)
5201     {
5202       /* Done.  Get rid of the obstack.  */
5203       obstack_free (&obstack, NULL);
5204       buf = NULL;
5205       len_avail = 0;
5206       return 0;
5207     }
5208 
5209   if (len > len_avail - offset)
5210     len = len_avail - offset;
5211   memcpy (readbuf, buf + offset, len);
5212 
5213   return len;
5214 }
5215 
5216 static LONGEST
5217 linux_xfer_partial (struct target_ops *ops, enum target_object object,
5218                     const char *annex, gdb_byte *readbuf,
5219 		    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5220 {
5221   LONGEST xfer;
5222 
5223   if (object == TARGET_OBJECT_AUXV)
5224     return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
5225 			     offset, len);
5226 
5227   if (object == TARGET_OBJECT_OSDATA)
5228     return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5229                                offset, len);
5230 
5231   if (object == TARGET_OBJECT_SPU)
5232     return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5233 				offset, len);
5234 
5235   /* GDB calculates all the addresses in possibly larget width of the address.
5236      Address width needs to be masked before its final use - either by
5237      linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5238 
5239      Compare ADDR_BIT first to avoid a compiler warning on shift overflow.  */
5240 
5241   if (object == TARGET_OBJECT_MEMORY)
5242     {
5243       int addr_bit = gdbarch_addr_bit (target_gdbarch);
5244 
5245       if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5246 	offset &= ((ULONGEST) 1 << addr_bit) - 1;
5247     }
5248 
5249   xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5250 				  offset, len);
5251   if (xfer != 0)
5252     return xfer;
5253 
5254   return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5255 			     offset, len);
5256 }
5257 
5258 /* Create a prototype generic GNU/Linux target.  The client can override
5259    it with local methods.  */
5260 
5261 static void
5262 linux_target_install_ops (struct target_ops *t)
5263 {
5264   t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5265   t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
5266   t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5267   t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
5268   t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
5269   t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
5270   t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
5271   t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
5272   t->to_post_startup_inferior = linux_child_post_startup_inferior;
5273   t->to_post_attach = linux_child_post_attach;
5274   t->to_follow_fork = linux_child_follow_fork;
5275   t->to_find_memory_regions = linux_nat_find_memory_regions;
5276   t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5277 
5278   super_xfer_partial = t->to_xfer_partial;
5279   t->to_xfer_partial = linux_xfer_partial;
5280 }
5281 
5282 struct target_ops *
5283 linux_target (void)
5284 {
5285   struct target_ops *t;
5286 
5287   t = inf_ptrace_target ();
5288   linux_target_install_ops (t);
5289 
5290   return t;
5291 }
5292 
5293 struct target_ops *
5294 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
5295 {
5296   struct target_ops *t;
5297 
5298   t = inf_ptrace_trad_target (register_u_offset);
5299   linux_target_install_ops (t);
5300 
5301   return t;
5302 }
5303 
5304 /* target_is_async_p implementation.  */
5305 
5306 static int
5307 linux_nat_is_async_p (void)
5308 {
5309   /* NOTE: palves 2008-03-21: We're only async when the user requests
5310      it explicitly with the "set target-async" command.
5311      Someday, linux will always be async.  */
5312   if (!target_async_permitted)
5313     return 0;
5314 
5315   /* See target.h/target_async_mask.  */
5316   return linux_nat_async_mask_value;
5317 }
5318 
5319 /* target_can_async_p implementation.  */
5320 
5321 static int
5322 linux_nat_can_async_p (void)
5323 {
5324   /* NOTE: palves 2008-03-21: We're only async when the user requests
5325      it explicitly with the "set target-async" command.
5326      Someday, linux will always be async.  */
5327   if (!target_async_permitted)
5328     return 0;
5329 
5330   /* See target.h/target_async_mask.  */
5331   return linux_nat_async_mask_value;
5332 }
5333 
5334 static int
5335 linux_nat_supports_non_stop (void)
5336 {
5337   return 1;
5338 }
5339 
5340 /* True if we want to support multi-process.  To be removed when GDB
5341    supports multi-exec.  */
5342 
5343 int linux_multi_process = 1;
5344 
5345 static int
5346 linux_nat_supports_multi_process (void)
5347 {
5348   return linux_multi_process;
5349 }
5350 
5351 /* target_async_mask implementation.  */
5352 
5353 static int
5354 linux_nat_async_mask (int new_mask)
5355 {
5356   int curr_mask = linux_nat_async_mask_value;
5357 
5358   if (curr_mask != new_mask)
5359     {
5360       if (new_mask == 0)
5361 	{
5362 	  linux_nat_async (NULL, 0);
5363 	  linux_nat_async_mask_value = new_mask;
5364 	}
5365       else
5366 	{
5367 	  linux_nat_async_mask_value = new_mask;
5368 
5369 	  /* If we're going out of async-mask in all-stop, then the
5370 	     inferior is stopped.  The next resume will call
5371 	     target_async.  In non-stop, the target event source
5372 	     should be always registered in the event loop.  Do so
5373 	     now.  */
5374 	  if (non_stop)
5375 	    linux_nat_async (inferior_event_handler, 0);
5376 	}
5377     }
5378 
5379   return curr_mask;
5380 }
5381 
5382 static int async_terminal_is_ours = 1;
5383 
5384 /* target_terminal_inferior implementation.  */
5385 
5386 static void
5387 linux_nat_terminal_inferior (void)
5388 {
5389   if (!target_is_async_p ())
5390     {
5391       /* Async mode is disabled.  */
5392       terminal_inferior ();
5393       return;
5394     }
5395 
5396   terminal_inferior ();
5397 
5398   /* Calls to target_terminal_*() are meant to be idempotent.  */
5399   if (!async_terminal_is_ours)
5400     return;
5401 
5402   delete_file_handler (input_fd);
5403   async_terminal_is_ours = 0;
5404   set_sigint_trap ();
5405 }
5406 
5407 /* target_terminal_ours implementation.  */
5408 
5409 static void
5410 linux_nat_terminal_ours (void)
5411 {
5412   if (!target_is_async_p ())
5413     {
5414       /* Async mode is disabled.  */
5415       terminal_ours ();
5416       return;
5417     }
5418 
5419   /* GDB should never give the terminal to the inferior if the
5420      inferior is running in the background (run&, continue&, etc.),
5421      but claiming it sure should.  */
5422   terminal_ours ();
5423 
5424   if (async_terminal_is_ours)
5425     return;
5426 
5427   clear_sigint_trap ();
5428   add_file_handler (input_fd, stdin_event_handler, 0);
5429   async_terminal_is_ours = 1;
5430 }
5431 
5432 static void (*async_client_callback) (enum inferior_event_type event_type,
5433 				      void *context);
5434 static void *async_client_context;
5435 
5436 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5437    so we notice when any child changes state, and notify the
5438    event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5439    above to wait for the arrival of a SIGCHLD.  */
5440 
5441 static void
5442 sigchld_handler (int signo)
5443 {
5444   int old_errno = errno;
5445 
5446   if (debug_linux_nat_async)
5447     fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5448 
5449   if (signo == SIGCHLD
5450       && linux_nat_event_pipe[0] != -1)
5451     async_file_mark (); /* Let the event loop know that there are
5452 			   events to handle.  */
5453 
5454   errno = old_errno;
5455 }
5456 
5457 /* Callback registered with the target events file descriptor.  */
5458 
5459 static void
5460 handle_target_event (int error, gdb_client_data client_data)
5461 {
5462   (*async_client_callback) (INF_REG_EVENT, async_client_context);
5463 }
5464 
5465 /* Create/destroy the target events pipe.  Returns previous state.  */
5466 
5467 static int
5468 linux_async_pipe (int enable)
5469 {
5470   int previous = (linux_nat_event_pipe[0] != -1);
5471 
5472   if (previous != enable)
5473     {
5474       sigset_t prev_mask;
5475 
5476       block_child_signals (&prev_mask);
5477 
5478       if (enable)
5479 	{
5480 	  if (pipe (linux_nat_event_pipe) == -1)
5481 	    internal_error (__FILE__, __LINE__,
5482 			    "creating event pipe failed.");
5483 
5484 	  fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5485 	  fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5486 	}
5487       else
5488 	{
5489 	  close (linux_nat_event_pipe[0]);
5490 	  close (linux_nat_event_pipe[1]);
5491 	  linux_nat_event_pipe[0] = -1;
5492 	  linux_nat_event_pipe[1] = -1;
5493 	}
5494 
5495       restore_child_signals_mask (&prev_mask);
5496     }
5497 
5498   return previous;
5499 }
5500 
5501 /* target_async implementation.  */
5502 
5503 static void
5504 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5505 				   void *context), void *context)
5506 {
5507   if (linux_nat_async_mask_value == 0 || !target_async_permitted)
5508     internal_error (__FILE__, __LINE__,
5509 		    "Calling target_async when async is masked");
5510 
5511   if (callback != NULL)
5512     {
5513       async_client_callback = callback;
5514       async_client_context = context;
5515       if (!linux_async_pipe (1))
5516 	{
5517 	  add_file_handler (linux_nat_event_pipe[0],
5518 			    handle_target_event, NULL);
5519 	  /* There may be pending events to handle.  Tell the event loop
5520 	     to poll them.  */
5521 	  async_file_mark ();
5522 	}
5523     }
5524   else
5525     {
5526       async_client_callback = callback;
5527       async_client_context = context;
5528       delete_file_handler (linux_nat_event_pipe[0]);
5529       linux_async_pipe (0);
5530     }
5531   return;
5532 }
5533 
5534 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5535    event came out.  */
5536 
5537 static int
5538 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5539 {
5540   if (!lwp->stopped)
5541     {
5542       ptid_t ptid = lwp->ptid;
5543 
5544       if (debug_linux_nat)
5545 	fprintf_unfiltered (gdb_stdlog,
5546 			    "LNSL: running -> suspending %s\n",
5547 			    target_pid_to_str (lwp->ptid));
5548 
5549 
5550       stop_callback (lwp, NULL);
5551       stop_wait_callback (lwp, NULL);
5552 
5553       /* If the lwp exits while we try to stop it, there's nothing
5554 	 else to do.  */
5555       lwp = find_lwp_pid (ptid);
5556       if (lwp == NULL)
5557 	return 0;
5558 
5559       /* If we didn't collect any signal other than SIGSTOP while
5560 	 stopping the LWP, push a SIGNAL_0 event.  In either case, the
5561 	 event-loop will end up calling target_wait which will collect
5562 	 these.  */
5563       if (lwp->status == 0)
5564 	lwp->status = W_STOPCODE (0);
5565       async_file_mark ();
5566     }
5567   else
5568     {
5569       /* Already known to be stopped; do nothing.  */
5570 
5571       if (debug_linux_nat)
5572 	{
5573 	  if (find_thread_ptid (lwp->ptid)->stop_requested)
5574 	    fprintf_unfiltered (gdb_stdlog,
5575 				"LNSL: already stopped/stop_requested %s\n",
5576 				target_pid_to_str (lwp->ptid));
5577 	  else
5578 	    fprintf_unfiltered (gdb_stdlog,
5579 				"LNSL: already stopped/no "
5580 				"stop_requested yet %s\n",
5581 				target_pid_to_str (lwp->ptid));
5582 	}
5583     }
5584   return 0;
5585 }
5586 
5587 static void
5588 linux_nat_stop (ptid_t ptid)
5589 {
5590   if (non_stop)
5591     iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5592   else
5593     linux_ops->to_stop (ptid);
5594 }
5595 
5596 static void
5597 linux_nat_close (int quitting)
5598 {
5599   /* Unregister from the event loop.  */
5600   if (target_is_async_p ())
5601     target_async (NULL, 0);
5602 
5603   /* Reset the async_masking.  */
5604   linux_nat_async_mask_value = 1;
5605 
5606   if (linux_ops->to_close)
5607     linux_ops->to_close (quitting);
5608 }
5609 
5610 /* When requests are passed down from the linux-nat layer to the
5611    single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5612    used.  The address space pointer is stored in the inferior object,
5613    but the common code that is passed such ptid can't tell whether
5614    lwpid is a "main" process id or not (it assumes so).  We reverse
5615    look up the "main" process id from the lwp here.  */
5616 
5617 struct address_space *
5618 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5619 {
5620   struct lwp_info *lwp;
5621   struct inferior *inf;
5622   int pid;
5623 
5624   pid = GET_LWP (ptid);
5625   if (GET_LWP (ptid) == 0)
5626     {
5627       /* An (lwpid,0,0) ptid.  Look up the lwp object to get at the
5628 	 tgid.  */
5629       lwp = find_lwp_pid (ptid);
5630       pid = GET_PID (lwp->ptid);
5631     }
5632   else
5633     {
5634       /* A (pid,lwpid,0) ptid.  */
5635       pid = GET_PID (ptid);
5636     }
5637 
5638   inf = find_inferior_pid (pid);
5639   gdb_assert (inf != NULL);
5640   return inf->aspace;
5641 }
5642 
5643 int
5644 linux_nat_core_of_thread_1 (ptid_t ptid)
5645 {
5646   struct cleanup *back_to;
5647   char *filename;
5648   FILE *f;
5649   char *content = NULL;
5650   char *p;
5651   char *ts = 0;
5652   int content_read = 0;
5653   int i;
5654   int core;
5655 
5656   filename = xstrprintf ("/proc/%d/task/%ld/stat",
5657 			 GET_PID (ptid), GET_LWP (ptid));
5658   back_to = make_cleanup (xfree, filename);
5659 
5660   f = fopen (filename, "r");
5661   if (!f)
5662     {
5663       do_cleanups (back_to);
5664       return -1;
5665     }
5666 
5667   make_cleanup_fclose (f);
5668 
5669   for (;;)
5670     {
5671       int n;
5672 
5673       content = xrealloc (content, content_read + 1024);
5674       n = fread (content + content_read, 1, 1024, f);
5675       content_read += n;
5676       if (n < 1024)
5677 	{
5678 	  content[content_read] = '\0';
5679 	  break;
5680 	}
5681     }
5682 
5683   make_cleanup (xfree, content);
5684 
5685   p = strchr (content, '(');
5686 
5687   /* Skip ")".  */
5688   if (p != NULL)
5689     p = strchr (p, ')');
5690   if (p != NULL)
5691     p++;
5692 
5693   /* If the first field after program name has index 0, then core number is
5694      the field with index 36.  There's no constant for that anywhere.  */
5695   if (p != NULL)
5696     p = strtok_r (p, " ", &ts);
5697   for (i = 0; p != NULL && i != 36; ++i)
5698     p = strtok_r (NULL, " ", &ts);
5699 
5700   if (p == NULL || sscanf (p, "%d", &core) == 0)
5701     core = -1;
5702 
5703   do_cleanups (back_to);
5704 
5705   return core;
5706 }
5707 
5708 /* Return the cached value of the processor core for thread PTID.  */
5709 
5710 int
5711 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5712 {
5713   struct lwp_info *info = find_lwp_pid (ptid);
5714 
5715   if (info)
5716     return info->core;
5717   return -1;
5718 }
5719 
5720 void
5721 linux_nat_add_target (struct target_ops *t)
5722 {
5723   /* Save the provided single-threaded target.  We save this in a separate
5724      variable because another target we've inherited from (e.g. inf-ptrace)
5725      may have saved a pointer to T; we want to use it for the final
5726      process stratum target.  */
5727   linux_ops_saved = *t;
5728   linux_ops = &linux_ops_saved;
5729 
5730   /* Override some methods for multithreading.  */
5731   t->to_create_inferior = linux_nat_create_inferior;
5732   t->to_attach = linux_nat_attach;
5733   t->to_detach = linux_nat_detach;
5734   t->to_resume = linux_nat_resume;
5735   t->to_wait = linux_nat_wait;
5736   t->to_xfer_partial = linux_nat_xfer_partial;
5737   t->to_kill = linux_nat_kill;
5738   t->to_mourn_inferior = linux_nat_mourn_inferior;
5739   t->to_thread_alive = linux_nat_thread_alive;
5740   t->to_pid_to_str = linux_nat_pid_to_str;
5741   t->to_thread_name = linux_nat_thread_name;
5742   t->to_has_thread_control = tc_schedlock;
5743   t->to_thread_address_space = linux_nat_thread_address_space;
5744   t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5745   t->to_stopped_data_address = linux_nat_stopped_data_address;
5746 
5747   t->to_can_async_p = linux_nat_can_async_p;
5748   t->to_is_async_p = linux_nat_is_async_p;
5749   t->to_supports_non_stop = linux_nat_supports_non_stop;
5750   t->to_async = linux_nat_async;
5751   t->to_async_mask = linux_nat_async_mask;
5752   t->to_terminal_inferior = linux_nat_terminal_inferior;
5753   t->to_terminal_ours = linux_nat_terminal_ours;
5754   t->to_close = linux_nat_close;
5755 
5756   /* Methods for non-stop support.  */
5757   t->to_stop = linux_nat_stop;
5758 
5759   t->to_supports_multi_process = linux_nat_supports_multi_process;
5760 
5761   t->to_core_of_thread = linux_nat_core_of_thread;
5762 
5763   /* We don't change the stratum; this target will sit at
5764      process_stratum and thread_db will set at thread_stratum.  This
5765      is a little strange, since this is a multi-threaded-capable
5766      target, but we want to be on the stack below thread_db, and we
5767      also want to be used for single-threaded processes.  */
5768 
5769   add_target (t);
5770 }
5771 
5772 /* Register a method to call whenever a new thread is attached.  */
5773 void
5774 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5775 {
5776   /* Save the pointer.  We only support a single registered instance
5777      of the GNU/Linux native target, so we do not need to map this to
5778      T.  */
5779   linux_nat_new_thread = new_thread;
5780 }
5781 
5782 /* Register a method that converts a siginfo object between the layout
5783    that ptrace returns, and the layout in the architecture of the
5784    inferior.  */
5785 void
5786 linux_nat_set_siginfo_fixup (struct target_ops *t,
5787 			     int (*siginfo_fixup) (struct siginfo *,
5788 						   gdb_byte *,
5789 						   int))
5790 {
5791   /* Save the pointer.  */
5792   linux_nat_siginfo_fixup = siginfo_fixup;
5793 }
5794 
5795 /* Return the saved siginfo associated with PTID.  */
5796 struct siginfo *
5797 linux_nat_get_siginfo (ptid_t ptid)
5798 {
5799   struct lwp_info *lp = find_lwp_pid (ptid);
5800 
5801   gdb_assert (lp != NULL);
5802 
5803   return &lp->siginfo;
5804 }
5805 
5806 /* Provide a prototype to silence -Wmissing-prototypes.  */
5807 extern initialize_file_ftype _initialize_linux_nat;
5808 
5809 void
5810 _initialize_linux_nat (void)
5811 {
5812   add_info ("proc", linux_nat_info_proc_cmd, _("\
5813 Show /proc process information about any running process.\n\
5814 Specify any process id, or use the program being debugged by default.\n\
5815 Specify any of the following keywords for detailed info:\n\
5816   mappings -- list of mapped memory regions.\n\
5817   stat     -- list a bunch of random process info.\n\
5818   status   -- list a different bunch of random process info.\n\
5819   all      -- list all available /proc info."));
5820 
5821   add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5822 			    &debug_linux_nat, _("\
5823 Set debugging of GNU/Linux lwp module."), _("\
5824 Show debugging of GNU/Linux lwp module."), _("\
5825 Enables printf debugging output."),
5826 			    NULL,
5827 			    show_debug_linux_nat,
5828 			    &setdebuglist, &showdebuglist);
5829 
5830   add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5831 			    &debug_linux_nat_async, _("\
5832 Set debugging of GNU/Linux async lwp module."), _("\
5833 Show debugging of GNU/Linux async lwp module."), _("\
5834 Enables printf debugging output."),
5835 			    NULL,
5836 			    show_debug_linux_nat_async,
5837 			    &setdebuglist, &showdebuglist);
5838 
5839   /* Save this mask as the default.  */
5840   sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5841 
5842   /* Install a SIGCHLD handler.  */
5843   sigchld_action.sa_handler = sigchld_handler;
5844   sigemptyset (&sigchld_action.sa_mask);
5845   sigchld_action.sa_flags = SA_RESTART;
5846 
5847   /* Make it the default.  */
5848   sigaction (SIGCHLD, &sigchld_action, NULL);
5849 
5850   /* Make sure we don't block SIGCHLD during a sigsuspend.  */
5851   sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5852   sigdelset (&suspend_mask, SIGCHLD);
5853 
5854   sigemptyset (&blocked_mask);
5855 
5856   add_setshow_boolean_cmd ("disable-randomization", class_support,
5857 			   &disable_randomization, _("\
5858 Set disabling of debuggee's virtual address space randomization."), _("\
5859 Show disabling of debuggee's virtual address space randomization."), _("\
5860 When this mode is on (which is the default), randomization of the virtual\n\
5861 address space is disabled.  Standalone programs run with the randomization\n\
5862 enabled by default on some platforms."),
5863 			   &set_disable_randomization,
5864 			   &show_disable_randomization,
5865 			   &setlist, &showlist);
5866 }
5867 
5868 
5869 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5870    the GNU/Linux Threads library and therefore doesn't really belong
5871    here.  */
5872 
5873 /* Read variable NAME in the target and return its value if found.
5874    Otherwise return zero.  It is assumed that the type of the variable
5875    is `int'.  */
5876 
5877 static int
5878 get_signo (const char *name)
5879 {
5880   struct minimal_symbol *ms;
5881   int signo;
5882 
5883   ms = lookup_minimal_symbol (name, NULL, NULL);
5884   if (ms == NULL)
5885     return 0;
5886 
5887   if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5888 			  sizeof (signo)) != 0)
5889     return 0;
5890 
5891   return signo;
5892 }
5893 
5894 /* Return the set of signals used by the threads library in *SET.  */
5895 
5896 void
5897 lin_thread_get_thread_signals (sigset_t *set)
5898 {
5899   struct sigaction action;
5900   int restart, cancel;
5901 
5902   sigemptyset (&blocked_mask);
5903   sigemptyset (set);
5904 
5905   restart = get_signo ("__pthread_sig_restart");
5906   cancel = get_signo ("__pthread_sig_cancel");
5907 
5908   /* LinuxThreads normally uses the first two RT signals, but in some legacy
5909      cases may use SIGUSR1/SIGUSR2.  NPTL always uses RT signals, but does
5910      not provide any way for the debugger to query the signal numbers -
5911      fortunately they don't change!  */
5912 
5913   if (restart == 0)
5914     restart = __SIGRTMIN;
5915 
5916   if (cancel == 0)
5917     cancel = __SIGRTMIN + 1;
5918 
5919   sigaddset (set, restart);
5920   sigaddset (set, cancel);
5921 
5922   /* The GNU/Linux Threads library makes terminating threads send a
5923      special "cancel" signal instead of SIGCHLD.  Make sure we catch
5924      those (to prevent them from terminating GDB itself, which is
5925      likely to be their default action) and treat them the same way as
5926      SIGCHLD.  */
5927 
5928   action.sa_handler = sigchld_handler;
5929   sigemptyset (&action.sa_mask);
5930   action.sa_flags = SA_RESTART;
5931   sigaction (cancel, &action, NULL);
5932 
5933   /* We block the "cancel" signal throughout this code ...  */
5934   sigaddset (&blocked_mask, cancel);
5935   sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5936 
5937   /* ... except during a sigsuspend.  */
5938   sigdelset (&suspend_mask, cancel);
5939 }
5940