xref: /dflybsd-src/contrib/gdb-7/gdb/solib-svr4.c (revision f41d807a0c7c535d8f66f0593fb6e95fa20f82d4)
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2 
3    Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4    2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5    Free Software Foundation, Inc.
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 
24 #include "elf/external.h"
25 #include "elf/common.h"
26 #include "elf/mips.h"
27 
28 #include "symtab.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdbcore.h"
33 #include "target.h"
34 #include "inferior.h"
35 #include "regcache.h"
36 #include "gdbthread.h"
37 #include "observer.h"
38 
39 #include "gdb_assert.h"
40 
41 #include "solist.h"
42 #include "solib.h"
43 #include "solib-svr4.h"
44 
45 #include "bfd-target.h"
46 #include "elf-bfd.h"
47 #include "exec.h"
48 #include "auxv.h"
49 #include "exceptions.h"
50 
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54 
55 /* Link map info to include in an allocated so_list entry */
56 
57 struct lm_info
58   {
59     /* Pointer to copy of link map from inferior.  The type is char *
60        rather than void *, so that we may use byte offsets to find the
61        various fields without the need for a cast.  */
62     gdb_byte *lm;
63 
64     /* Amount by which addresses in the binary should be relocated to
65        match the inferior.  This could most often be taken directly
66        from lm, but when prelinking is involved and the prelink base
67        address changes, we may need a different offset, we want to
68        warn about the difference and compute it only once.  */
69     CORE_ADDR l_addr;
70 
71     /* The target location of lm.  */
72     CORE_ADDR lm_addr;
73   };
74 
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76    GDB can try to place a breakpoint to monitor shared library
77    events.
78 
79    If none of these symbols are found, or other errors occur, then
80    SVR4 systems will fall back to using a symbol as the "startup
81    mapping complete" breakpoint address.  */
82 
83 static char *solib_break_names[] =
84 {
85   "r_debug_state",
86   "_r_debug_state",
87   "_dl_debug_state",
88   "rtld_db_dlactivity",
89   "__dl_rtld_db_dlactivity",
90   "_rtld_debug_state",
91 
92   NULL
93 };
94 
95 static char *bkpt_names[] =
96 {
97   "_start",
98   "__start",
99   "main",
100   NULL
101 };
102 
103 static char *main_name_list[] =
104 {
105   "main_$main",
106   NULL
107 };
108 
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110    the same shared library.  */
111 
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115   if (strcmp (gdb_so_name, inferior_so_name) == 0)
116     return 1;
117 
118   /* On Solaris, when starting inferior we think that dynamic linker is
119      /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120      contains /lib/ld.so.1.  Sometimes one file is a link to another, but
121      sometimes they have identical content, but are not linked to each
122      other.  We don't restrict this check for Solaris, but the chances
123      of running into this situation elsewhere are very low.  */
124   if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125       && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126     return 1;
127 
128   /* Similarly, we observed the same issue with sparc64, but with
129      different locations.  */
130   if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131       && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132     return 1;
133 
134   return 0;
135 }
136 
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140   return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142 
143 /* link map access functions */
144 
145 static CORE_ADDR
146 LM_ADDR_FROM_LINK_MAP (struct so_list *so)
147 {
148   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
150 
151   return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
152 				ptr_type);
153 }
154 
155 static int
156 HAS_LM_DYNAMIC_FROM_LINK_MAP (void)
157 {
158   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
159 
160   return lmo->l_ld_offset >= 0;
161 }
162 
163 static CORE_ADDR
164 LM_DYNAMIC_FROM_LINK_MAP (struct so_list *so)
165 {
166   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
168 
169   return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
170 				ptr_type);
171 }
172 
173 static CORE_ADDR
174 LM_ADDR_CHECK (struct so_list *so, bfd *abfd)
175 {
176   if (so->lm_info->l_addr == (CORE_ADDR)-1)
177     {
178       struct bfd_section *dyninfo_sect;
179       CORE_ADDR l_addr, l_dynaddr, dynaddr;
180 
181       l_addr = LM_ADDR_FROM_LINK_MAP (so);
182 
183       if (! abfd || ! HAS_LM_DYNAMIC_FROM_LINK_MAP ())
184 	goto set_addr;
185 
186       l_dynaddr = LM_DYNAMIC_FROM_LINK_MAP (so);
187 
188       dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189       if (dyninfo_sect == NULL)
190 	goto set_addr;
191 
192       dynaddr = bfd_section_vma (abfd, dyninfo_sect);
193 
194       if (dynaddr + l_addr != l_dynaddr)
195 	{
196 	  CORE_ADDR align = 0x1000;
197 	  CORE_ADDR minpagesize = align;
198 
199 	  if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
200 	    {
201 	      Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202 	      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
203 	      int i;
204 
205 	      align = 1;
206 
207 	      for (i = 0; i < ehdr->e_phnum; i++)
208 		if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209 		  align = phdr[i].p_align;
210 
211 	      minpagesize = get_elf_backend_data (abfd)->minpagesize;
212 	    }
213 
214 	  /* Turn it into a mask.  */
215 	  align--;
216 
217 	  /* If the changes match the alignment requirements, we
218 	     assume we're using a core file that was generated by the
219 	     same binary, just prelinked with a different base offset.
220 	     If it doesn't match, we may have a different binary, the
221 	     same binary with the dynamic table loaded at an unrelated
222 	     location, or anything, really.  To avoid regressions,
223 	     don't adjust the base offset in the latter case, although
224 	     odds are that, if things really changed, debugging won't
225 	     quite work.
226 
227 	     One could expect more the condition
228 	       ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229 	     but the one below is relaxed for PPC.  The PPC kernel supports
230 	     either 4k or 64k page sizes.  To be prepared for 64k pages,
231 	     PPC ELF files are built using an alignment requirement of 64k.
232 	     However, when running on a kernel supporting 4k pages, the memory
233 	     mapping of the library may not actually happen on a 64k boundary!
234 
235 	     (In the usual case where (l_addr & align) == 0, this check is
236 	     equivalent to the possibly expected check above.)
237 
238 	     Even on PPC it must be zero-aligned at least for MINPAGESIZE.  */
239 
240 	  if ((l_addr & (minpagesize - 1)) == 0
241 	      && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
242 	    {
243 	      l_addr = l_dynaddr - dynaddr;
244 
245 	      if (info_verbose)
246 		printf_unfiltered (_("Using PIC (Position Independent Code) "
247 				     "prelink displacement %s for \"%s\".\n"),
248 				   paddress (target_gdbarch, l_addr),
249 				   so->so_name);
250 	    }
251 	  else
252 	    warning (_(".dynamic section for \"%s\" "
253 		       "is not at the expected address "
254 		       "(wrong library or version mismatch?)"), so->so_name);
255 	}
256 
257     set_addr:
258       so->lm_info->l_addr = l_addr;
259     }
260 
261   return so->lm_info->l_addr;
262 }
263 
264 static CORE_ADDR
265 LM_NEXT (struct so_list *so)
266 {
267   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
268   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
269 
270   return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
271 				ptr_type);
272 }
273 
274 static CORE_ADDR
275 LM_PREV (struct so_list *so)
276 {
277   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
278   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
279 
280   return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
281 				ptr_type);
282 }
283 
284 static CORE_ADDR
285 LM_NAME (struct so_list *so)
286 {
287   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
288   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
289 
290   return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
291 				ptr_type);
292 }
293 
294 static int
295 IGNORE_FIRST_LINK_MAP_ENTRY (struct so_list *so)
296 {
297   /* Assume that everything is a library if the dynamic loader was loaded
298      late by a static executable.  */
299   if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
300     return 0;
301 
302   return LM_PREV (so) == 0;
303 }
304 
305 /* Per pspace SVR4 specific data.  */
306 
307 struct svr4_info
308 {
309   CORE_ADDR debug_base;	/* Base of dynamic linker structures */
310 
311   /* Validity flag for debug_loader_offset.  */
312   int debug_loader_offset_p;
313 
314   /* Load address for the dynamic linker, inferred.  */
315   CORE_ADDR debug_loader_offset;
316 
317   /* Name of the dynamic linker, valid if debug_loader_offset_p.  */
318   char *debug_loader_name;
319 
320   /* Load map address for the main executable.  */
321   CORE_ADDR main_lm_addr;
322 
323   CORE_ADDR interp_text_sect_low;
324   CORE_ADDR interp_text_sect_high;
325   CORE_ADDR interp_plt_sect_low;
326   CORE_ADDR interp_plt_sect_high;
327 };
328 
329 /* Per-program-space data key.  */
330 static const struct program_space_data *solib_svr4_pspace_data;
331 
332 static void
333 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
334 {
335   struct svr4_info *info;
336 
337   info = program_space_data (pspace, solib_svr4_pspace_data);
338   xfree (info);
339 }
340 
341 /* Get the current svr4 data.  If none is found yet, add it now.  This
342    function always returns a valid object.  */
343 
344 static struct svr4_info *
345 get_svr4_info (void)
346 {
347   struct svr4_info *info;
348 
349   info = program_space_data (current_program_space, solib_svr4_pspace_data);
350   if (info != NULL)
351     return info;
352 
353   info = XZALLOC (struct svr4_info);
354   set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
355   return info;
356 }
357 
358 /* Local function prototypes */
359 
360 static int match_main (char *);
361 
362 static CORE_ADDR bfd_lookup_symbol (bfd *, char *);
363 
364 /*
365 
366    LOCAL FUNCTION
367 
368    bfd_lookup_symbol -- lookup the value for a specific symbol
369 
370    SYNOPSIS
371 
372    CORE_ADDR bfd_lookup_symbol (bfd *abfd, char *symname)
373 
374    DESCRIPTION
375 
376    An expensive way to lookup the value of a single symbol for
377    bfd's that are only temporary anyway.  This is used by the
378    shared library support to find the address of the debugger
379    notification routine in the shared library.
380 
381    The returned symbol may be in a code or data section; functions
382    will normally be in a code section, but may be in a data section
383    if this architecture uses function descriptors.
384 
385    Note that 0 is specifically allowed as an error return (no
386    such symbol).
387  */
388 
389 static CORE_ADDR
390 bfd_lookup_symbol (bfd *abfd, char *symname)
391 {
392   long storage_needed;
393   asymbol *sym;
394   asymbol **symbol_table;
395   unsigned int number_of_symbols;
396   unsigned int i;
397   struct cleanup *back_to;
398   CORE_ADDR symaddr = 0;
399 
400   storage_needed = bfd_get_symtab_upper_bound (abfd);
401 
402   if (storage_needed > 0)
403     {
404       symbol_table = (asymbol **) xmalloc (storage_needed);
405       back_to = make_cleanup (xfree, symbol_table);
406       number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
407 
408       for (i = 0; i < number_of_symbols; i++)
409 	{
410 	  sym = *symbol_table++;
411 	  if (strcmp (sym->name, symname) == 0
412               && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
413 	    {
414 	      /* BFD symbols are section relative.  */
415 	      symaddr = sym->value + sym->section->vma;
416 	      break;
417 	    }
418 	}
419       do_cleanups (back_to);
420     }
421 
422   if (symaddr)
423     return symaddr;
424 
425   /* On FreeBSD, the dynamic linker is stripped by default.  So we'll
426      have to check the dynamic string table too.  */
427 
428   storage_needed = bfd_get_dynamic_symtab_upper_bound (abfd);
429 
430   if (storage_needed > 0)
431     {
432       symbol_table = (asymbol **) xmalloc (storage_needed);
433       back_to = make_cleanup (xfree, symbol_table);
434       number_of_symbols = bfd_canonicalize_dynamic_symtab (abfd, symbol_table);
435 
436       for (i = 0; i < number_of_symbols; i++)
437 	{
438 	  sym = *symbol_table++;
439 
440 	  if (strcmp (sym->name, symname) == 0
441               && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
442 	    {
443 	      /* BFD symbols are section relative.  */
444 	      symaddr = sym->value + sym->section->vma;
445 	      break;
446 	    }
447 	}
448       do_cleanups (back_to);
449     }
450 
451   return symaddr;
452 }
453 
454 
455 /* Read program header TYPE from inferior memory.  The header is found
456    by scanning the OS auxillary vector.
457 
458    If TYPE == -1, return the program headers instead of the contents of
459    one program header.
460 
461    Return a pointer to allocated memory holding the program header contents,
462    or NULL on failure.  If sucessful, and unless P_SECT_SIZE is NULL, the
463    size of those contents is returned to P_SECT_SIZE.  Likewise, the target
464    architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE.  */
465 
466 static gdb_byte *
467 read_program_header (int type, int *p_sect_size, int *p_arch_size)
468 {
469   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
470   CORE_ADDR at_phdr, at_phent, at_phnum;
471   int arch_size, sect_size;
472   CORE_ADDR sect_addr;
473   gdb_byte *buf;
474 
475   /* Get required auxv elements from target.  */
476   if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
477     return 0;
478   if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
479     return 0;
480   if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
481     return 0;
482   if (!at_phdr || !at_phnum)
483     return 0;
484 
485   /* Determine ELF architecture type.  */
486   if (at_phent == sizeof (Elf32_External_Phdr))
487     arch_size = 32;
488   else if (at_phent == sizeof (Elf64_External_Phdr))
489     arch_size = 64;
490   else
491     return 0;
492 
493   /* Find the requested segment.  */
494   if (type == -1)
495     {
496       sect_addr = at_phdr;
497       sect_size = at_phent * at_phnum;
498     }
499   else if (arch_size == 32)
500     {
501       Elf32_External_Phdr phdr;
502       int i;
503 
504       /* Search for requested PHDR.  */
505       for (i = 0; i < at_phnum; i++)
506 	{
507 	  if (target_read_memory (at_phdr + i * sizeof (phdr),
508 				  (gdb_byte *)&phdr, sizeof (phdr)))
509 	    return 0;
510 
511 	  if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
512 					4, byte_order) == type)
513 	    break;
514 	}
515 
516       if (i == at_phnum)
517 	return 0;
518 
519       /* Retrieve address and size.  */
520       sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
521 					    4, byte_order);
522       sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
523 					    4, byte_order);
524     }
525   else
526     {
527       Elf64_External_Phdr phdr;
528       int i;
529 
530       /* Search for requested PHDR.  */
531       for (i = 0; i < at_phnum; i++)
532 	{
533 	  if (target_read_memory (at_phdr + i * sizeof (phdr),
534 				  (gdb_byte *)&phdr, sizeof (phdr)))
535 	    return 0;
536 
537 	  if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
538 					4, byte_order) == type)
539 	    break;
540 	}
541 
542       if (i == at_phnum)
543 	return 0;
544 
545       /* Retrieve address and size.  */
546       sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
547 					    8, byte_order);
548       sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
549 					    8, byte_order);
550     }
551 
552   /* Read in requested program header.  */
553   buf = xmalloc (sect_size);
554   if (target_read_memory (sect_addr, buf, sect_size))
555     {
556       xfree (buf);
557       return NULL;
558     }
559 
560   if (p_arch_size)
561     *p_arch_size = arch_size;
562   if (p_sect_size)
563     *p_sect_size = sect_size;
564 
565   return buf;
566 }
567 
568 
569 /* Return program interpreter string.  */
570 static gdb_byte *
571 find_program_interpreter (void)
572 {
573   gdb_byte *buf = NULL;
574 
575   /* If we have an exec_bfd, use its section table.  */
576   if (exec_bfd
577       && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
578    {
579      struct bfd_section *interp_sect;
580 
581      interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
582      if (interp_sect != NULL)
583       {
584 	int sect_size = bfd_section_size (exec_bfd, interp_sect);
585 
586 	buf = xmalloc (sect_size);
587 	bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
588       }
589    }
590 
591   /* If we didn't find it, use the target auxillary vector.  */
592   if (!buf)
593     buf = read_program_header (PT_INTERP, NULL, NULL);
594 
595   return buf;
596 }
597 
598 
599 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
600    returned and the corresponding PTR is set.  */
601 
602 static int
603 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
604 {
605   int arch_size, step, sect_size;
606   long dyn_tag;
607   CORE_ADDR dyn_ptr, dyn_addr;
608   gdb_byte *bufend, *bufstart, *buf;
609   Elf32_External_Dyn *x_dynp_32;
610   Elf64_External_Dyn *x_dynp_64;
611   struct bfd_section *sect;
612   struct target_section *target_section;
613 
614   if (abfd == NULL)
615     return 0;
616 
617   if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
618     return 0;
619 
620   arch_size = bfd_get_arch_size (abfd);
621   if (arch_size == -1)
622     return 0;
623 
624   /* Find the start address of the .dynamic section.  */
625   sect = bfd_get_section_by_name (abfd, ".dynamic");
626   if (sect == NULL)
627     return 0;
628 
629   for (target_section = current_target_sections->sections;
630        target_section < current_target_sections->sections_end;
631        target_section++)
632     if (sect == target_section->the_bfd_section)
633       break;
634   if (target_section < current_target_sections->sections_end)
635     dyn_addr = target_section->addr;
636   else
637     {
638       /* ABFD may come from OBJFILE acting only as a symbol file without being
639 	 loaded into the target (see add_symbol_file_command).  This case is
640 	 such fallback to the file VMA address without the possibility of
641 	 having the section relocated to its actual in-memory address.  */
642 
643       dyn_addr = bfd_section_vma (abfd, sect);
644     }
645 
646   /* Read in .dynamic from the BFD.  We will get the actual value
647      from memory later.  */
648   sect_size = bfd_section_size (abfd, sect);
649   buf = bufstart = alloca (sect_size);
650   if (!bfd_get_section_contents (abfd, sect,
651 				 buf, 0, sect_size))
652     return 0;
653 
654   /* Iterate over BUF and scan for DYNTAG.  If found, set PTR and return.  */
655   step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
656 			   : sizeof (Elf64_External_Dyn);
657   for (bufend = buf + sect_size;
658        buf < bufend;
659        buf += step)
660   {
661     if (arch_size == 32)
662       {
663 	x_dynp_32 = (Elf32_External_Dyn *) buf;
664 	dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
665 	dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
666       }
667     else
668       {
669 	x_dynp_64 = (Elf64_External_Dyn *) buf;
670 	dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
671 	dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
672       }
673      if (dyn_tag == DT_NULL)
674        return 0;
675      if (dyn_tag == dyntag)
676        {
677 	 /* If requested, try to read the runtime value of this .dynamic
678 	    entry.  */
679 	 if (ptr)
680 	   {
681 	     struct type *ptr_type;
682 	     gdb_byte ptr_buf[8];
683 	     CORE_ADDR ptr_addr;
684 
685 	     ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
686 	     ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
687 	     if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
688 	       dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
689 	     *ptr = dyn_ptr;
690 	   }
691 	 return 1;
692        }
693   }
694 
695   return 0;
696 }
697 
698 /* Scan for DYNTAG in .dynamic section of the target's main executable,
699    found by consulting the OS auxillary vector.  If DYNTAG is found 1 is
700    returned and the corresponding PTR is set.  */
701 
702 static int
703 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
704 {
705   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
706   int sect_size, arch_size, step;
707   long dyn_tag;
708   CORE_ADDR dyn_ptr;
709   gdb_byte *bufend, *bufstart, *buf;
710 
711   /* Read in .dynamic section.  */
712   buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
713   if (!buf)
714     return 0;
715 
716   /* Iterate over BUF and scan for DYNTAG.  If found, set PTR and return.  */
717   step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
718 			   : sizeof (Elf64_External_Dyn);
719   for (bufend = buf + sect_size;
720        buf < bufend;
721        buf += step)
722   {
723     if (arch_size == 32)
724       {
725 	Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
726 
727 	dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
728 					    4, byte_order);
729 	dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
730 					    4, byte_order);
731       }
732     else
733       {
734 	Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
735 
736 	dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
737 					    8, byte_order);
738 	dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
739 					    8, byte_order);
740       }
741     if (dyn_tag == DT_NULL)
742       break;
743 
744     if (dyn_tag == dyntag)
745       {
746 	if (ptr)
747 	  *ptr = dyn_ptr;
748 
749 	xfree (bufstart);
750 	return 1;
751       }
752   }
753 
754   xfree (bufstart);
755   return 0;
756 }
757 
758 
759 /*
760 
761    LOCAL FUNCTION
762 
763    elf_locate_base -- locate the base address of dynamic linker structs
764    for SVR4 elf targets.
765 
766    SYNOPSIS
767 
768    CORE_ADDR elf_locate_base (void)
769 
770    DESCRIPTION
771 
772    For SVR4 elf targets the address of the dynamic linker's runtime
773    structure is contained within the dynamic info section in the
774    executable file.  The dynamic section is also mapped into the
775    inferior address space.  Because the runtime loader fills in the
776    real address before starting the inferior, we have to read in the
777    dynamic info section from the inferior address space.
778    If there are any errors while trying to find the address, we
779    silently return 0, otherwise the found address is returned.
780 
781  */
782 
783 static CORE_ADDR
784 elf_locate_base (void)
785 {
786   struct minimal_symbol *msymbol;
787   CORE_ADDR dyn_ptr;
788 
789   /* Look for DT_MIPS_RLD_MAP first.  MIPS executables use this
790      instead of DT_DEBUG, although they sometimes contain an unused
791      DT_DEBUG.  */
792   if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
793       || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
794     {
795       struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
796       gdb_byte *pbuf;
797       int pbuf_size = TYPE_LENGTH (ptr_type);
798 
799       pbuf = alloca (pbuf_size);
800       /* DT_MIPS_RLD_MAP contains a pointer to the address
801 	 of the dynamic link structure.  */
802       if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
803 	return 0;
804       return extract_typed_address (pbuf, ptr_type);
805     }
806 
807   /* Find DT_DEBUG.  */
808   if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
809       || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
810     return dyn_ptr;
811 
812   /* This may be a static executable.  Look for the symbol
813      conventionally named _r_debug, as a last resort.  */
814   msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
815   if (msymbol != NULL)
816     return SYMBOL_VALUE_ADDRESS (msymbol);
817 
818   /* DT_DEBUG entry not found.  */
819   return 0;
820 }
821 
822 /*
823 
824    LOCAL FUNCTION
825 
826    locate_base -- locate the base address of dynamic linker structs
827 
828    SYNOPSIS
829 
830    CORE_ADDR locate_base (struct svr4_info *)
831 
832    DESCRIPTION
833 
834    For both the SunOS and SVR4 shared library implementations, if the
835    inferior executable has been linked dynamically, there is a single
836    address somewhere in the inferior's data space which is the key to
837    locating all of the dynamic linker's runtime structures.  This
838    address is the value of the debug base symbol.  The job of this
839    function is to find and return that address, or to return 0 if there
840    is no such address (the executable is statically linked for example).
841 
842    For SunOS, the job is almost trivial, since the dynamic linker and
843    all of it's structures are statically linked to the executable at
844    link time.  Thus the symbol for the address we are looking for has
845    already been added to the minimal symbol table for the executable's
846    objfile at the time the symbol file's symbols were read, and all we
847    have to do is look it up there.  Note that we explicitly do NOT want
848    to find the copies in the shared library.
849 
850    The SVR4 version is a bit more complicated because the address
851    is contained somewhere in the dynamic info section.  We have to go
852    to a lot more work to discover the address of the debug base symbol.
853    Because of this complexity, we cache the value we find and return that
854    value on subsequent invocations.  Note there is no copy in the
855    executable symbol tables.
856 
857  */
858 
859 static CORE_ADDR
860 locate_base (struct svr4_info *info)
861 {
862   /* Check to see if we have a currently valid address, and if so, avoid
863      doing all this work again and just return the cached address.  If
864      we have no cached address, try to locate it in the dynamic info
865      section for ELF executables.  There's no point in doing any of this
866      though if we don't have some link map offsets to work with.  */
867 
868   if (info->debug_base == 0 && svr4_have_link_map_offsets ())
869     info->debug_base = elf_locate_base ();
870   return info->debug_base;
871 }
872 
873 /* Find the first element in the inferior's dynamic link map, and
874    return its address in the inferior.  Return zero if the address
875    could not be determined.
876 
877    FIXME: Perhaps we should validate the info somehow, perhaps by
878    checking r_version for a known version number, or r_state for
879    RT_CONSISTENT.  */
880 
881 static CORE_ADDR
882 solib_svr4_r_map (struct svr4_info *info)
883 {
884   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
885   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
886   CORE_ADDR addr = 0;
887   volatile struct gdb_exception ex;
888 
889   TRY_CATCH (ex, RETURN_MASK_ERROR)
890     {
891       addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
892                                         ptr_type);
893     }
894   exception_print (gdb_stderr, ex);
895   return addr;
896 }
897 
898 /* Find r_brk from the inferior's debug base.  */
899 
900 static CORE_ADDR
901 solib_svr4_r_brk (struct svr4_info *info)
902 {
903   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
904   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
905 
906   return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
907 				    ptr_type);
908 }
909 
910 /* Find the link map for the dynamic linker (if it is not in the
911    normal list of loaded shared objects).  */
912 
913 static CORE_ADDR
914 solib_svr4_r_ldsomap (struct svr4_info *info)
915 {
916   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
917   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
918   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
919   ULONGEST version;
920 
921   /* Check version, and return zero if `struct r_debug' doesn't have
922      the r_ldsomap member.  */
923   version
924     = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
925 				    lmo->r_version_size, byte_order);
926   if (version < 2 || lmo->r_ldsomap_offset == -1)
927     return 0;
928 
929   return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
930 				    ptr_type);
931 }
932 
933 /* On Solaris systems with some versions of the dynamic linker,
934    ld.so's l_name pointer points to the SONAME in the string table
935    rather than into writable memory.  So that GDB can find shared
936    libraries when loading a core file generated by gcore, ensure that
937    memory areas containing the l_name string are saved in the core
938    file.  */
939 
940 static int
941 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
942 {
943   struct svr4_info *info;
944   CORE_ADDR ldsomap;
945   struct so_list *new;
946   struct cleanup *old_chain;
947   struct link_map_offsets *lmo;
948   CORE_ADDR lm_name;
949 
950   info = get_svr4_info ();
951 
952   info->debug_base = 0;
953   locate_base (info);
954   if (!info->debug_base)
955     return 0;
956 
957   ldsomap = solib_svr4_r_ldsomap (info);
958   if (!ldsomap)
959     return 0;
960 
961   lmo = svr4_fetch_link_map_offsets ();
962   new = XZALLOC (struct so_list);
963   old_chain = make_cleanup (xfree, new);
964   new->lm_info = xmalloc (sizeof (struct lm_info));
965   make_cleanup (xfree, new->lm_info);
966   new->lm_info->l_addr = (CORE_ADDR)-1;
967   new->lm_info->lm_addr = ldsomap;
968   new->lm_info->lm = xzalloc (lmo->link_map_size);
969   make_cleanup (xfree, new->lm_info->lm);
970   read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
971   lm_name = LM_NAME (new);
972   do_cleanups (old_chain);
973 
974   return (lm_name >= vaddr && lm_name < vaddr + size);
975 }
976 
977 /*
978 
979   LOCAL FUNCTION
980 
981   open_symbol_file_object
982 
983   SYNOPSIS
984 
985   void open_symbol_file_object (void *from_tty)
986 
987   DESCRIPTION
988 
989   If no open symbol file, attempt to locate and open the main symbol
990   file.  On SVR4 systems, this is the first link map entry.  If its
991   name is here, we can open it.  Useful when attaching to a process
992   without first loading its symbol file.
993 
994   If FROM_TTYP dereferences to a non-zero integer, allow messages to
995   be printed.  This parameter is a pointer rather than an int because
996   open_symbol_file_object() is called via catch_errors() and
997   catch_errors() requires a pointer argument. */
998 
999 static int
1000 open_symbol_file_object (void *from_ttyp)
1001 {
1002   CORE_ADDR lm, l_name;
1003   char *filename;
1004   int errcode;
1005   int from_tty = *(int *)from_ttyp;
1006   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1007   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
1008   int l_name_size = TYPE_LENGTH (ptr_type);
1009   gdb_byte *l_name_buf = xmalloc (l_name_size);
1010   struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1011   struct svr4_info *info = get_svr4_info ();
1012 
1013   if (symfile_objfile)
1014     if (!query (_("Attempt to reload symbols from process? ")))
1015       return 0;
1016 
1017   /* Always locate the debug struct, in case it has moved.  */
1018   info->debug_base = 0;
1019   if (locate_base (info) == 0)
1020     return 0;	/* failed somehow... */
1021 
1022   /* First link map member should be the executable.  */
1023   lm = solib_svr4_r_map (info);
1024   if (lm == 0)
1025     return 0;	/* failed somehow... */
1026 
1027   /* Read address of name from target memory to GDB.  */
1028   read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1029 
1030   /* Convert the address to host format.  */
1031   l_name = extract_typed_address (l_name_buf, ptr_type);
1032 
1033   /* Free l_name_buf.  */
1034   do_cleanups (cleanups);
1035 
1036   if (l_name == 0)
1037     return 0;		/* No filename.  */
1038 
1039   /* Now fetch the filename from target memory.  */
1040   target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1041   make_cleanup (xfree, filename);
1042 
1043   if (errcode)
1044     {
1045       warning (_("failed to read exec filename from attached file: %s"),
1046 	       safe_strerror (errcode));
1047       return 0;
1048     }
1049 
1050   /* Have a pathname: read the symbol file.  */
1051   symbol_file_add_main (filename, from_tty);
1052 
1053   return 1;
1054 }
1055 
1056 /* If no shared library information is available from the dynamic
1057    linker, build a fallback list from other sources.  */
1058 
1059 static struct so_list *
1060 svr4_default_sos (void)
1061 {
1062   struct svr4_info *info = get_svr4_info ();
1063 
1064   struct so_list *head = NULL;
1065   struct so_list **link_ptr = &head;
1066 
1067   if (info->debug_loader_offset_p)
1068     {
1069       struct so_list *new = XZALLOC (struct so_list);
1070 
1071       new->lm_info = xmalloc (sizeof (struct lm_info));
1072 
1073       /* Nothing will ever check the cached copy of the link
1074 	 map if we set l_addr.  */
1075       new->lm_info->l_addr = info->debug_loader_offset;
1076       new->lm_info->lm_addr = 0;
1077       new->lm_info->lm = NULL;
1078 
1079       strncpy (new->so_name, info->debug_loader_name,
1080 	       SO_NAME_MAX_PATH_SIZE - 1);
1081       new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1082       strcpy (new->so_original_name, new->so_name);
1083 
1084       *link_ptr = new;
1085       link_ptr = &new->next;
1086     }
1087 
1088   return head;
1089 }
1090 
1091 /* LOCAL FUNCTION
1092 
1093    current_sos -- build a list of currently loaded shared objects
1094 
1095    SYNOPSIS
1096 
1097    struct so_list *current_sos ()
1098 
1099    DESCRIPTION
1100 
1101    Build a list of `struct so_list' objects describing the shared
1102    objects currently loaded in the inferior.  This list does not
1103    include an entry for the main executable file.
1104 
1105    Note that we only gather information directly available from the
1106    inferior --- we don't examine any of the shared library files
1107    themselves.  The declaration of `struct so_list' says which fields
1108    we provide values for.  */
1109 
1110 static struct so_list *
1111 svr4_current_sos (void)
1112 {
1113   CORE_ADDR lm, prev_lm;
1114   struct so_list *head = 0;
1115   struct so_list **link_ptr = &head;
1116   CORE_ADDR ldsomap = 0;
1117   struct svr4_info *info;
1118 
1119   info = get_svr4_info ();
1120 
1121   /* Always locate the debug struct, in case it has moved.  */
1122   info->debug_base = 0;
1123   locate_base (info);
1124 
1125   /* If we can't find the dynamic linker's base structure, this
1126      must not be a dynamically linked executable.  Hmm.  */
1127   if (! info->debug_base)
1128     return svr4_default_sos ();
1129 
1130   /* Walk the inferior's link map list, and build our list of
1131      `struct so_list' nodes.  */
1132   prev_lm = 0;
1133   lm = solib_svr4_r_map (info);
1134 
1135   while (lm)
1136     {
1137       struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1138       struct so_list *new = XZALLOC (struct so_list);
1139       struct cleanup *old_chain = make_cleanup (xfree, new);
1140       CORE_ADDR next_lm;
1141 
1142       new->lm_info = xmalloc (sizeof (struct lm_info));
1143       make_cleanup (xfree, new->lm_info);
1144 
1145       new->lm_info->l_addr = (CORE_ADDR)-1;
1146       new->lm_info->lm_addr = lm;
1147       new->lm_info->lm = xzalloc (lmo->link_map_size);
1148       make_cleanup (xfree, new->lm_info->lm);
1149 
1150       read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1151 
1152       next_lm = LM_NEXT (new);
1153 
1154       if (LM_PREV (new) != prev_lm)
1155 	{
1156 	  warning (_("Corrupted shared library list"));
1157 	  free_so (new);
1158 	  next_lm = 0;
1159 	}
1160 
1161       /* For SVR4 versions, the first entry in the link map is for the
1162          inferior executable, so we must ignore it.  For some versions of
1163          SVR4, it has no name.  For others (Solaris 2.3 for example), it
1164          does have a name, so we can no longer use a missing name to
1165          decide when to ignore it. */
1166       else if (IGNORE_FIRST_LINK_MAP_ENTRY (new) && ldsomap == 0)
1167 	{
1168 	  info->main_lm_addr = new->lm_info->lm_addr;
1169 	  free_so (new);
1170 	}
1171       else
1172 	{
1173 	  int errcode;
1174 	  char *buffer;
1175 
1176 	  /* Extract this shared object's name.  */
1177 	  target_read_string (LM_NAME (new), &buffer,
1178 			      SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1179 	  if (errcode != 0)
1180 	    warning (_("Can't read pathname for load map: %s."),
1181 		     safe_strerror (errcode));
1182 	  else
1183 	    {
1184 	      strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1185 	      new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1186 	      strcpy (new->so_original_name, new->so_name);
1187 	    }
1188 	  xfree (buffer);
1189 
1190 	  /* If this entry has no name, or its name matches the name
1191 	     for the main executable, don't include it in the list.  */
1192 	  if (! new->so_name[0]
1193 	      || match_main (new->so_name))
1194 	    free_so (new);
1195 	  else
1196 	    {
1197 	      new->next = 0;
1198 	      *link_ptr = new;
1199 	      link_ptr = &new->next;
1200 	    }
1201 	}
1202 
1203       prev_lm = lm;
1204       lm = next_lm;
1205 
1206       /* On Solaris, the dynamic linker is not in the normal list of
1207 	 shared objects, so make sure we pick it up too.  Having
1208 	 symbol information for the dynamic linker is quite crucial
1209 	 for skipping dynamic linker resolver code.  */
1210       if (lm == 0 && ldsomap == 0)
1211 	{
1212 	  lm = ldsomap = solib_svr4_r_ldsomap (info);
1213 	  prev_lm = 0;
1214 	}
1215 
1216       discard_cleanups (old_chain);
1217     }
1218 
1219   if (head == NULL)
1220     return svr4_default_sos ();
1221 
1222   return head;
1223 }
1224 
1225 /* Get the address of the link_map for a given OBJFILE.  */
1226 
1227 CORE_ADDR
1228 svr4_fetch_objfile_link_map (struct objfile *objfile)
1229 {
1230   struct so_list *so;
1231   struct svr4_info *info = get_svr4_info ();
1232 
1233   /* Cause svr4_current_sos() to be run if it hasn't been already.  */
1234   if (info->main_lm_addr == 0)
1235     solib_add (NULL, 0, &current_target, auto_solib_add);
1236 
1237   /* svr4_current_sos() will set main_lm_addr for the main executable.  */
1238   if (objfile == symfile_objfile)
1239     return info->main_lm_addr;
1240 
1241   /* The other link map addresses may be found by examining the list
1242      of shared libraries.  */
1243   for (so = master_so_list (); so; so = so->next)
1244     if (so->objfile == objfile)
1245       return so->lm_info->lm_addr;
1246 
1247   /* Not found!  */
1248   return 0;
1249 }
1250 
1251 /* On some systems, the only way to recognize the link map entry for
1252    the main executable file is by looking at its name.  Return
1253    non-zero iff SONAME matches one of the known main executable names.  */
1254 
1255 static int
1256 match_main (char *soname)
1257 {
1258   char **mainp;
1259 
1260   for (mainp = main_name_list; *mainp != NULL; mainp++)
1261     {
1262       if (strcmp (soname, *mainp) == 0)
1263 	return (1);
1264     }
1265 
1266   return (0);
1267 }
1268 
1269 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1270    SVR4 run time loader.  */
1271 
1272 int
1273 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1274 {
1275   struct svr4_info *info = get_svr4_info ();
1276 
1277   return ((pc >= info->interp_text_sect_low
1278 	   && pc < info->interp_text_sect_high)
1279 	  || (pc >= info->interp_plt_sect_low
1280 	      && pc < info->interp_plt_sect_high)
1281 	  || in_plt_section (pc, NULL));
1282 }
1283 
1284 /* Given an executable's ABFD and target, compute the entry-point
1285    address.  */
1286 
1287 static CORE_ADDR
1288 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1289 {
1290   /* KevinB wrote ... for most targets, the address returned by
1291      bfd_get_start_address() is the entry point for the start
1292      function.  But, for some targets, bfd_get_start_address() returns
1293      the address of a function descriptor from which the entry point
1294      address may be extracted.  This address is extracted by
1295      gdbarch_convert_from_func_ptr_addr().  The method
1296      gdbarch_convert_from_func_ptr_addr() is the merely the identify
1297      function for targets which don't use function descriptors.  */
1298   return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1299 					     bfd_get_start_address (abfd),
1300 					     targ);
1301 }
1302 
1303 /*
1304 
1305    LOCAL FUNCTION
1306 
1307    enable_break -- arrange for dynamic linker to hit breakpoint
1308 
1309    SYNOPSIS
1310 
1311    int enable_break (void)
1312 
1313    DESCRIPTION
1314 
1315    Both the SunOS and the SVR4 dynamic linkers have, as part of their
1316    debugger interface, support for arranging for the inferior to hit
1317    a breakpoint after mapping in the shared libraries.  This function
1318    enables that breakpoint.
1319 
1320    For SunOS, there is a special flag location (in_debugger) which we
1321    set to 1.  When the dynamic linker sees this flag set, it will set
1322    a breakpoint at a location known only to itself, after saving the
1323    original contents of that place and the breakpoint address itself,
1324    in it's own internal structures.  When we resume the inferior, it
1325    will eventually take a SIGTRAP when it runs into the breakpoint.
1326    We handle this (in a different place) by restoring the contents of
1327    the breakpointed location (which is only known after it stops),
1328    chasing around to locate the shared libraries that have been
1329    loaded, then resuming.
1330 
1331    For SVR4, the debugger interface structure contains a member (r_brk)
1332    which is statically initialized at the time the shared library is
1333    built, to the offset of a function (_r_debug_state) which is guaran-
1334    teed to be called once before mapping in a library, and again when
1335    the mapping is complete.  At the time we are examining this member,
1336    it contains only the unrelocated offset of the function, so we have
1337    to do our own relocation.  Later, when the dynamic linker actually
1338    runs, it relocates r_brk to be the actual address of _r_debug_state().
1339 
1340    The debugger interface structure also contains an enumeration which
1341    is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1342    depending upon whether or not the library is being mapped or unmapped,
1343    and then set to RT_CONSISTENT after the library is mapped/unmapped.
1344  */
1345 
1346 static int
1347 enable_break (struct svr4_info *info, int from_tty)
1348 {
1349   struct minimal_symbol *msymbol;
1350   char **bkpt_namep;
1351   asection *interp_sect;
1352   gdb_byte *interp_name;
1353   CORE_ADDR sym_addr;
1354 
1355   info->interp_text_sect_low = info->interp_text_sect_high = 0;
1356   info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1357 
1358   /* If we already have a shared library list in the target, and
1359      r_debug contains r_brk, set the breakpoint there - this should
1360      mean r_brk has already been relocated.  Assume the dynamic linker
1361      is the object containing r_brk.  */
1362 
1363   solib_add (NULL, from_tty, &current_target, auto_solib_add);
1364   sym_addr = 0;
1365   if (info->debug_base && solib_svr4_r_map (info) != 0)
1366     sym_addr = solib_svr4_r_brk (info);
1367 
1368   if (sym_addr != 0)
1369     {
1370       struct obj_section *os;
1371 
1372       sym_addr = gdbarch_addr_bits_remove
1373 	(target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1374 							      sym_addr,
1375 							      &current_target));
1376 
1377       /* On at least some versions of Solaris there's a dynamic relocation
1378 	 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1379 	 we get control before the dynamic linker has self-relocated.
1380 	 Check if SYM_ADDR is in a known section, if it is assume we can
1381 	 trust its value.  This is just a heuristic though, it could go away
1382 	 or be replaced if it's getting in the way.
1383 
1384 	 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1385 	 however it's spelled in your particular system) is ARM or Thumb.
1386 	 That knowledge is encoded in the address, if it's Thumb the low bit
1387 	 is 1.  However, we've stripped that info above and it's not clear
1388 	 what all the consequences are of passing a non-addr_bits_remove'd
1389 	 address to create_solib_event_breakpoint.  The call to
1390 	 find_pc_section verifies we know about the address and have some
1391 	 hope of computing the right kind of breakpoint to use (via
1392 	 symbol info).  It does mean that GDB needs to be pointed at a
1393 	 non-stripped version of the dynamic linker in order to obtain
1394 	 information it already knows about.  Sigh.  */
1395 
1396       os = find_pc_section (sym_addr);
1397       if (os != NULL)
1398 	{
1399 	  /* Record the relocated start and end address of the dynamic linker
1400 	     text and plt section for svr4_in_dynsym_resolve_code.  */
1401 	  bfd *tmp_bfd;
1402 	  CORE_ADDR load_addr;
1403 
1404 	  tmp_bfd = os->objfile->obfd;
1405 	  load_addr = ANOFFSET (os->objfile->section_offsets,
1406 				os->objfile->sect_index_text);
1407 
1408 	  interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1409 	  if (interp_sect)
1410 	    {
1411 	      info->interp_text_sect_low =
1412 		bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1413 	      info->interp_text_sect_high =
1414 		info->interp_text_sect_low
1415 		+ bfd_section_size (tmp_bfd, interp_sect);
1416 	    }
1417 	  interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1418 	  if (interp_sect)
1419 	    {
1420 	      info->interp_plt_sect_low =
1421 		bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1422 	      info->interp_plt_sect_high =
1423 		info->interp_plt_sect_low
1424 		+ bfd_section_size (tmp_bfd, interp_sect);
1425 	    }
1426 
1427 	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1428 	  return 1;
1429 	}
1430     }
1431 
1432   /* Find the program interpreter; if not found, warn the user and drop
1433      into the old breakpoint at symbol code.  */
1434   interp_name = find_program_interpreter ();
1435   if (interp_name)
1436     {
1437       CORE_ADDR load_addr = 0;
1438       int load_addr_found = 0;
1439       int loader_found_in_list = 0;
1440       struct so_list *so;
1441       bfd *tmp_bfd = NULL;
1442       struct target_ops *tmp_bfd_target;
1443       volatile struct gdb_exception ex;
1444 
1445       sym_addr = 0;
1446 
1447       /* Now we need to figure out where the dynamic linker was
1448          loaded so that we can load its symbols and place a breakpoint
1449          in the dynamic linker itself.
1450 
1451          This address is stored on the stack.  However, I've been unable
1452          to find any magic formula to find it for Solaris (appears to
1453          be trivial on GNU/Linux).  Therefore, we have to try an alternate
1454          mechanism to find the dynamic linker's base address.  */
1455 
1456       TRY_CATCH (ex, RETURN_MASK_ALL)
1457         {
1458 	  tmp_bfd = solib_bfd_open (interp_name);
1459 	}
1460       if (tmp_bfd == NULL)
1461 	goto bkpt_at_symbol;
1462 
1463       /* Now convert the TMP_BFD into a target.  That way target, as
1464          well as BFD operations can be used.  Note that closing the
1465          target will also close the underlying bfd.  */
1466       tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1467 
1468       /* On a running target, we can get the dynamic linker's base
1469          address from the shared library table.  */
1470       so = master_so_list ();
1471       while (so)
1472 	{
1473 	  if (svr4_same_1 (interp_name, so->so_original_name))
1474 	    {
1475 	      load_addr_found = 1;
1476 	      loader_found_in_list = 1;
1477 	      load_addr = LM_ADDR_CHECK (so, tmp_bfd);
1478 	      break;
1479 	    }
1480 	  so = so->next;
1481 	}
1482 
1483       /* If we were not able to find the base address of the loader
1484          from our so_list, then try using the AT_BASE auxilliary entry.  */
1485       if (!load_addr_found)
1486         if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1487 	  {
1488 	    int addr_bit = gdbarch_addr_bit (target_gdbarch);
1489 
1490 	    /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1491 	       that `+ load_addr' will overflow CORE_ADDR width not creating
1492 	       invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1493 	       GDB.  */
1494 
1495 	    if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1496 	      {
1497 		CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1498 		CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1499 							      tmp_bfd_target);
1500 
1501 		gdb_assert (load_addr < space_size);
1502 
1503 		/* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1504 		   64bit ld.so with 32bit executable, it should not happen.  */
1505 
1506 		if (tmp_entry_point < space_size
1507 		    && tmp_entry_point + load_addr >= space_size)
1508 		  load_addr -= space_size;
1509 	      }
1510 
1511 	    load_addr_found = 1;
1512 	  }
1513 
1514       /* Otherwise we find the dynamic linker's base address by examining
1515 	 the current pc (which should point at the entry point for the
1516 	 dynamic linker) and subtracting the offset of the entry point.
1517 
1518          This is more fragile than the previous approaches, but is a good
1519          fallback method because it has actually been working well in
1520          most cases.  */
1521       if (!load_addr_found)
1522 	{
1523 	  struct regcache *regcache
1524 	    = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1525 
1526 	  load_addr = (regcache_read_pc (regcache)
1527 		       - exec_entry_point (tmp_bfd, tmp_bfd_target));
1528 	}
1529 
1530       if (!loader_found_in_list)
1531 	{
1532 	  info->debug_loader_name = xstrdup (interp_name);
1533 	  info->debug_loader_offset_p = 1;
1534 	  info->debug_loader_offset = load_addr;
1535 	  solib_add (NULL, from_tty, &current_target, auto_solib_add);
1536 	}
1537 
1538       /* Record the relocated start and end address of the dynamic linker
1539          text and plt section for svr4_in_dynsym_resolve_code.  */
1540       interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1541       if (interp_sect)
1542 	{
1543 	  info->interp_text_sect_low =
1544 	    bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1545 	  info->interp_text_sect_high =
1546 	    info->interp_text_sect_low
1547 	    + bfd_section_size (tmp_bfd, interp_sect);
1548 	}
1549       interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1550       if (interp_sect)
1551 	{
1552 	  info->interp_plt_sect_low =
1553 	    bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1554 	  info->interp_plt_sect_high =
1555 	    info->interp_plt_sect_low
1556 	    + bfd_section_size (tmp_bfd, interp_sect);
1557 	}
1558 
1559       /* Now try to set a breakpoint in the dynamic linker.  */
1560       for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1561 	{
1562 	  sym_addr = bfd_lookup_symbol (tmp_bfd, *bkpt_namep);
1563 	  if (sym_addr != 0)
1564 	    break;
1565 	}
1566 
1567       if (sym_addr != 0)
1568 	/* Convert 'sym_addr' from a function pointer to an address.
1569 	   Because we pass tmp_bfd_target instead of the current
1570 	   target, this will always produce an unrelocated value.  */
1571 	sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1572 						       sym_addr,
1573 						       tmp_bfd_target);
1574 
1575       /* We're done with both the temporary bfd and target.  Remember,
1576          closing the target closes the underlying bfd.  */
1577       target_close (tmp_bfd_target, 0);
1578 
1579       if (sym_addr != 0)
1580 	{
1581 	  create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1582 	  xfree (interp_name);
1583 	  return 1;
1584 	}
1585 
1586       /* For whatever reason we couldn't set a breakpoint in the dynamic
1587          linker.  Warn and drop into the old code.  */
1588     bkpt_at_symbol:
1589       xfree (interp_name);
1590       warning (_("Unable to find dynamic linker breakpoint function.\n"
1591                "GDB will be unable to debug shared library initializers\n"
1592                "and track explicitly loaded dynamic code."));
1593     }
1594 
1595   /* Scan through the lists of symbols, trying to look up the symbol and
1596      set a breakpoint there.  Terminate loop when we/if we succeed.  */
1597 
1598   for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1599     {
1600       msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1601       if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1602 	{
1603 	  sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1604 	  sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1605 							 sym_addr,
1606 							 &current_target);
1607 	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1608 	  return 1;
1609 	}
1610     }
1611 
1612   for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1613     {
1614       msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1615       if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1616 	{
1617 	  sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1618 	  sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1619 							 sym_addr,
1620 							 &current_target);
1621 	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1622 	  return 1;
1623 	}
1624     }
1625   return 0;
1626 }
1627 
1628 /*
1629 
1630    LOCAL FUNCTION
1631 
1632    special_symbol_handling -- additional shared library symbol handling
1633 
1634    SYNOPSIS
1635 
1636    void special_symbol_handling ()
1637 
1638    DESCRIPTION
1639 
1640    Once the symbols from a shared object have been loaded in the usual
1641    way, we are called to do any system specific symbol handling that
1642    is needed.
1643 
1644    For SunOS4, this consisted of grunging around in the dynamic
1645    linkers structures to find symbol definitions for "common" symbols
1646    and adding them to the minimal symbol table for the runtime common
1647    objfile.
1648 
1649    However, for SVR4, there's nothing to do.
1650 
1651  */
1652 
1653 static void
1654 svr4_special_symbol_handling (void)
1655 {
1656 }
1657 
1658 /* Read the ELF program headers from ABFD.  Return the contents and
1659    set *PHDRS_SIZE to the size of the program headers.  */
1660 
1661 static gdb_byte *
1662 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1663 {
1664   Elf_Internal_Ehdr *ehdr;
1665   gdb_byte *buf;
1666 
1667   ehdr = elf_elfheader (abfd);
1668 
1669   *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1670   if (*phdrs_size == 0)
1671     return NULL;
1672 
1673   buf = xmalloc (*phdrs_size);
1674   if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1675       || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1676     {
1677       xfree (buf);
1678       return NULL;
1679     }
1680 
1681   return buf;
1682 }
1683 
1684 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1685    exec_bfd.  Otherwise return 0.
1686 
1687    We relocate all of the sections by the same amount.  This
1688    behavior is mandated by recent editions of the System V ABI.
1689    According to the System V Application Binary Interface,
1690    Edition 4.1, page 5-5:
1691 
1692      ...  Though the system chooses virtual addresses for
1693      individual processes, it maintains the segments' relative
1694      positions.  Because position-independent code uses relative
1695      addressesing between segments, the difference between
1696      virtual addresses in memory must match the difference
1697      between virtual addresses in the file.  The difference
1698      between the virtual address of any segment in memory and
1699      the corresponding virtual address in the file is thus a
1700      single constant value for any one executable or shared
1701      object in a given process.  This difference is the base
1702      address.  One use of the base address is to relocate the
1703      memory image of the program during dynamic linking.
1704 
1705    The same language also appears in Edition 4.0 of the System V
1706    ABI and is left unspecified in some of the earlier editions.
1707 
1708    Decide if the objfile needs to be relocated.  As indicated above, we will
1709    only be here when execution is stopped.  But during attachment PC can be at
1710    arbitrary address therefore regcache_read_pc can be misleading (contrary to
1711    the auxv AT_ENTRY value).  Moreover for executable with interpreter section
1712    regcache_read_pc would point to the interpreter and not the main executable.
1713 
1714    So, to summarize, relocations are necessary when the start address obtained
1715    from the executable is different from the address in auxv AT_ENTRY entry.
1716 
1717    [ The astute reader will note that we also test to make sure that
1718      the executable in question has the DYNAMIC flag set.  It is my
1719      opinion that this test is unnecessary (undesirable even).  It
1720      was added to avoid inadvertent relocation of an executable
1721      whose e_type member in the ELF header is not ET_DYN.  There may
1722      be a time in the future when it is desirable to do relocations
1723      on other types of files as well in which case this condition
1724      should either be removed or modified to accomodate the new file
1725      type.  - Kevin, Nov 2000. ]  */
1726 
1727 static int
1728 svr4_exec_displacement (CORE_ADDR *displacementp)
1729 {
1730   /* ENTRY_POINT is a possible function descriptor - before
1731      a call to gdbarch_convert_from_func_ptr_addr.  */
1732   CORE_ADDR entry_point, displacement;
1733 
1734   if (exec_bfd == NULL)
1735     return 0;
1736 
1737   /* Therefore for ELF it is ET_EXEC and not ET_DYN.  Both shared libraries
1738      being executed themselves and PIE (Position Independent Executable)
1739      executables are ET_DYN.  */
1740 
1741   if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1742     return 0;
1743 
1744   if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1745     return 0;
1746 
1747   displacement = entry_point - bfd_get_start_address (exec_bfd);
1748 
1749   /* Verify the DISPLACEMENT candidate complies with the required page
1750      alignment.  It is cheaper than the program headers comparison below.  */
1751 
1752   if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1753     {
1754       const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1755 
1756       /* p_align of PT_LOAD segments does not specify any alignment but
1757 	 only congruency of addresses:
1758 	   p_offset % p_align == p_vaddr % p_align
1759 	 Kernel is free to load the executable with lower alignment.  */
1760 
1761       if ((displacement & (elf->minpagesize - 1)) != 0)
1762 	return 0;
1763     }
1764 
1765   /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1766      comparing their program headers.  If the program headers in the auxilliary
1767      vector do not match the program headers in the executable, then we are
1768      looking at a different file than the one used by the kernel - for
1769      instance, "gdb program" connected to "gdbserver :PORT ld.so program".  */
1770 
1771   if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1772     {
1773       /* Be optimistic and clear OK only if GDB was able to verify the headers
1774 	 really do not match.  */
1775       int phdrs_size, phdrs2_size, ok = 1;
1776       gdb_byte *buf, *buf2;
1777       int arch_size;
1778 
1779       buf = read_program_header (-1, &phdrs_size, &arch_size);
1780       buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1781       if (buf != NULL && buf2 != NULL)
1782 	{
1783 	  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1784 
1785 	  /* We are dealing with three different addresses.  EXEC_BFD
1786 	     represents current address in on-disk file.  target memory content
1787 	     may be different from EXEC_BFD as the file may have been prelinked
1788 	     to a different address after the executable has been loaded.
1789 	     Moreover the address of placement in target memory can be
1790 	     different from what the program headers in target memory say - this
1791 	     is the goal of PIE.
1792 
1793 	     Detected DISPLACEMENT covers both the offsets of PIE placement and
1794 	     possible new prelink performed after start of the program.  Here
1795 	     relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1796 	     content offset for the verification purpose.  */
1797 
1798 	  if (phdrs_size != phdrs2_size
1799 	      || bfd_get_arch_size (exec_bfd) != arch_size)
1800 	    ok = 0;
1801 	  else if (arch_size == 32 && phdrs_size >= sizeof (Elf32_External_Phdr)
1802 	           && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1803 	    {
1804 	      Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1805 	      Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1806 	      CORE_ADDR displacement = 0;
1807 	      int i;
1808 
1809 	      /* DISPLACEMENT could be found more easily by the difference of
1810 		 ehdr2->e_entry.  But we haven't read the ehdr yet, and we
1811 		 already have enough information to compute that displacement
1812 		 with what we've read.  */
1813 
1814 	      for (i = 0; i < ehdr2->e_phnum; i++)
1815 		if (phdr2[i].p_type == PT_LOAD)
1816 		  {
1817 		    Elf32_External_Phdr *phdrp;
1818 		    gdb_byte *buf_vaddr_p, *buf_paddr_p;
1819 		    CORE_ADDR vaddr, paddr;
1820 		    CORE_ADDR displacement_vaddr = 0;
1821 		    CORE_ADDR displacement_paddr = 0;
1822 
1823 		    phdrp = &((Elf32_External_Phdr *) buf)[i];
1824 		    buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1825 		    buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1826 
1827 		    vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1828 						      byte_order);
1829 		    displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1830 
1831 		    paddr = extract_unsigned_integer (buf_paddr_p, 4,
1832 						      byte_order);
1833 		    displacement_paddr = paddr - phdr2[i].p_paddr;
1834 
1835 		    if (displacement_vaddr == displacement_paddr)
1836 		      displacement = displacement_vaddr;
1837 
1838 		    break;
1839 		  }
1840 
1841 	      /* Now compare BUF and BUF2 with optional DISPLACEMENT.  */
1842 
1843 	      for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1844 		{
1845 		  Elf32_External_Phdr *phdrp;
1846 		  Elf32_External_Phdr *phdr2p;
1847 		  gdb_byte *buf_vaddr_p, *buf_paddr_p;
1848 		  CORE_ADDR vaddr, paddr;
1849 
1850 		  phdrp = &((Elf32_External_Phdr *) buf)[i];
1851 		  buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1852 		  buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1853 		  phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1854 
1855 		  /* PT_GNU_STACK is an exception by being never relocated by
1856 		     prelink as its addresses are always zero.  */
1857 
1858 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1859 		    continue;
1860 
1861 		  /* Check also other adjustment combinations - PR 11786.  */
1862 
1863 		  vaddr = extract_unsigned_integer (buf_vaddr_p, 4, byte_order);
1864 		  vaddr -= displacement;
1865 		  store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1866 
1867 		  paddr = extract_unsigned_integer (buf_paddr_p, 4, byte_order);
1868 		  paddr -= displacement;
1869 		  store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1870 
1871 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1872 		    continue;
1873 
1874 		  ok = 0;
1875 		  break;
1876 		}
1877 	    }
1878 	  else if (arch_size == 64 && phdrs_size >= sizeof (Elf64_External_Phdr)
1879 	           && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1880 	    {
1881 	      Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1882 	      Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1883 	      CORE_ADDR displacement = 0;
1884 	      int i;
1885 
1886 	      /* DISPLACEMENT could be found more easily by the difference of
1887 		 ehdr2->e_entry.  But we haven't read the ehdr yet, and we
1888 		 already have enough information to compute that displacement
1889 		 with what we've read.  */
1890 
1891 	      for (i = 0; i < ehdr2->e_phnum; i++)
1892 		if (phdr2[i].p_type == PT_LOAD)
1893 		  {
1894 		    Elf64_External_Phdr *phdrp;
1895 		    gdb_byte *buf_vaddr_p, *buf_paddr_p;
1896 		    CORE_ADDR vaddr, paddr;
1897 		    CORE_ADDR displacement_vaddr = 0;
1898 		    CORE_ADDR displacement_paddr = 0;
1899 
1900 		    phdrp = &((Elf64_External_Phdr *) buf)[i];
1901 		    buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1902 		    buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1903 
1904 		    vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1905 						      byte_order);
1906 		    displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1907 
1908 		    paddr = extract_unsigned_integer (buf_paddr_p, 8,
1909 						      byte_order);
1910 		    displacement_paddr = paddr - phdr2[i].p_paddr;
1911 
1912 		    if (displacement_vaddr == displacement_paddr)
1913 		      displacement = displacement_vaddr;
1914 
1915 		    break;
1916 		  }
1917 
1918 	      /* Now compare BUF and BUF2 with optional DISPLACEMENT.  */
1919 
1920 	      for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
1921 		{
1922 		  Elf64_External_Phdr *phdrp;
1923 		  Elf64_External_Phdr *phdr2p;
1924 		  gdb_byte *buf_vaddr_p, *buf_paddr_p;
1925 		  CORE_ADDR vaddr, paddr;
1926 
1927 		  phdrp = &((Elf64_External_Phdr *) buf)[i];
1928 		  buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1929 		  buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1930 		  phdr2p = &((Elf64_External_Phdr *) buf2)[i];
1931 
1932 		  /* PT_GNU_STACK is an exception by being never relocated by
1933 		     prelink as its addresses are always zero.  */
1934 
1935 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1936 		    continue;
1937 
1938 		  /* Check also other adjustment combinations - PR 11786.  */
1939 
1940 		  vaddr = extract_unsigned_integer (buf_vaddr_p, 8, byte_order);
1941 		  vaddr -= displacement;
1942 		  store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
1943 
1944 		  paddr = extract_unsigned_integer (buf_paddr_p, 8, byte_order);
1945 		  paddr -= displacement;
1946 		  store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
1947 
1948 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1949 		    continue;
1950 
1951 		  ok = 0;
1952 		  break;
1953 		}
1954 	    }
1955 	  else
1956 	    ok = 0;
1957 	}
1958 
1959       xfree (buf);
1960       xfree (buf2);
1961 
1962       if (!ok)
1963 	return 0;
1964     }
1965 
1966   if (info_verbose)
1967     {
1968       /* It can be printed repeatedly as there is no easy way to check
1969 	 the executable symbols/file has been already relocated to
1970 	 displacement.  */
1971 
1972       printf_unfiltered (_("Using PIE (Position Independent Executable) "
1973 			   "displacement %s for \"%s\".\n"),
1974 			 paddress (target_gdbarch, displacement),
1975 			 bfd_get_filename (exec_bfd));
1976     }
1977 
1978   *displacementp = displacement;
1979   return 1;
1980 }
1981 
1982 /* Relocate the main executable.  This function should be called upon
1983    stopping the inferior process at the entry point to the program.
1984    The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
1985    different, the main executable is relocated by the proper amount.  */
1986 
1987 static void
1988 svr4_relocate_main_executable (void)
1989 {
1990   CORE_ADDR displacement;
1991 
1992   /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
1993      probably contains the offsets computed using the PIE displacement
1994      from the previous run, which of course are irrelevant for this run.
1995      So we need to determine the new PIE displacement and recompute the
1996      section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
1997      already contains pre-computed offsets.
1998 
1999      If we cannot compute the PIE displacement, either:
2000 
2001        - The executable is not PIE.
2002 
2003        - SYMFILE_OBJFILE does not match the executable started in the target.
2004 	 This can happen for main executable symbols loaded at the host while
2005 	 `ld.so --ld-args main-executable' is loaded in the target.
2006 
2007      Then we leave the section offsets untouched and use them as is for
2008      this run.  Either:
2009 
2010        - These section offsets were properly reset earlier, and thus
2011 	 already contain the correct values.  This can happen for instance
2012 	 when reconnecting via the remote protocol to a target that supports
2013 	 the `qOffsets' packet.
2014 
2015        - The section offsets were not reset earlier, and the best we can
2016 	 hope is that the old offsets are still applicable to the new run.
2017    */
2018 
2019   if (! svr4_exec_displacement (&displacement))
2020     return;
2021 
2022   /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2023      addresses.  */
2024 
2025   if (symfile_objfile)
2026     {
2027       struct section_offsets *new_offsets;
2028       int i;
2029 
2030       new_offsets = alloca (symfile_objfile->num_sections
2031 			    * sizeof (*new_offsets));
2032 
2033       for (i = 0; i < symfile_objfile->num_sections; i++)
2034 	new_offsets->offsets[i] = displacement;
2035 
2036       objfile_relocate (symfile_objfile, new_offsets);
2037     }
2038   else if (exec_bfd)
2039     {
2040       asection *asect;
2041 
2042       for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2043 	exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2044 				  (bfd_section_vma (exec_bfd, asect)
2045 				   + displacement));
2046     }
2047 }
2048 
2049 /*
2050 
2051    GLOBAL FUNCTION
2052 
2053    svr4_solib_create_inferior_hook -- shared library startup support
2054 
2055    SYNOPSIS
2056 
2057    void svr4_solib_create_inferior_hook (int from_tty)
2058 
2059    DESCRIPTION
2060 
2061    When gdb starts up the inferior, it nurses it along (through the
2062    shell) until it is ready to execute it's first instruction.  At this
2063    point, this function gets called via expansion of the macro
2064    SOLIB_CREATE_INFERIOR_HOOK.
2065 
2066    For SunOS executables, this first instruction is typically the
2067    one at "_start", or a similar text label, regardless of whether
2068    the executable is statically or dynamically linked.  The runtime
2069    startup code takes care of dynamically linking in any shared
2070    libraries, once gdb allows the inferior to continue.
2071 
2072    For SVR4 executables, this first instruction is either the first
2073    instruction in the dynamic linker (for dynamically linked
2074    executables) or the instruction at "start" for statically linked
2075    executables.  For dynamically linked executables, the system
2076    first exec's /lib/libc.so.N, which contains the dynamic linker,
2077    and starts it running.  The dynamic linker maps in any needed
2078    shared libraries, maps in the actual user executable, and then
2079    jumps to "start" in the user executable.
2080 
2081    For both SunOS shared libraries, and SVR4 shared libraries, we
2082    can arrange to cooperate with the dynamic linker to discover the
2083    names of shared libraries that are dynamically linked, and the
2084    base addresses to which they are linked.
2085 
2086    This function is responsible for discovering those names and
2087    addresses, and saving sufficient information about them to allow
2088    their symbols to be read at a later time.
2089 
2090    FIXME
2091 
2092    Between enable_break() and disable_break(), this code does not
2093    properly handle hitting breakpoints which the user might have
2094    set in the startup code or in the dynamic linker itself.  Proper
2095    handling will probably have to wait until the implementation is
2096    changed to use the "breakpoint handler function" method.
2097 
2098    Also, what if child has exit()ed?  Must exit loop somehow.
2099  */
2100 
2101 static void
2102 svr4_solib_create_inferior_hook (int from_tty)
2103 {
2104 #if defined(_SCO_DS)
2105   struct inferior *inf;
2106   struct thread_info *tp;
2107 #endif /* defined(_SCO_DS) */
2108   struct svr4_info *info;
2109 
2110   info = get_svr4_info ();
2111 
2112   /* Relocate the main executable if necessary.  */
2113   svr4_relocate_main_executable ();
2114 
2115   if (!svr4_have_link_map_offsets ())
2116     return;
2117 
2118   if (!enable_break (info, from_tty))
2119     return;
2120 
2121 #if defined(_SCO_DS)
2122   /* SCO needs the loop below, other systems should be using the
2123      special shared library breakpoints and the shared library breakpoint
2124      service routine.
2125 
2126      Now run the target.  It will eventually hit the breakpoint, at
2127      which point all of the libraries will have been mapped in and we
2128      can go groveling around in the dynamic linker structures to find
2129      out what we need to know about them. */
2130 
2131   inf = current_inferior ();
2132   tp = inferior_thread ();
2133 
2134   clear_proceed_status ();
2135   inf->stop_soon = STOP_QUIETLY;
2136   tp->stop_signal = TARGET_SIGNAL_0;
2137   do
2138     {
2139       target_resume (pid_to_ptid (-1), 0, tp->stop_signal);
2140       wait_for_inferior (0);
2141     }
2142   while (tp->stop_signal != TARGET_SIGNAL_TRAP);
2143   inf->stop_soon = NO_STOP_QUIETLY;
2144 #endif /* defined(_SCO_DS) */
2145 }
2146 
2147 static void
2148 svr4_clear_solib (void)
2149 {
2150   struct svr4_info *info;
2151 
2152   info = get_svr4_info ();
2153   info->debug_base = 0;
2154   info->debug_loader_offset_p = 0;
2155   info->debug_loader_offset = 0;
2156   xfree (info->debug_loader_name);
2157   info->debug_loader_name = NULL;
2158 }
2159 
2160 static void
2161 svr4_free_so (struct so_list *so)
2162 {
2163   xfree (so->lm_info->lm);
2164   xfree (so->lm_info);
2165 }
2166 
2167 
2168 /* Clear any bits of ADDR that wouldn't fit in a target-format
2169    data pointer.  "Data pointer" here refers to whatever sort of
2170    address the dynamic linker uses to manage its sections.  At the
2171    moment, we don't support shared libraries on any processors where
2172    code and data pointers are different sizes.
2173 
2174    This isn't really the right solution.  What we really need here is
2175    a way to do arithmetic on CORE_ADDR values that respects the
2176    natural pointer/address correspondence.  (For example, on the MIPS,
2177    converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2178    sign-extend the value.  There, simply truncating the bits above
2179    gdbarch_ptr_bit, as we do below, is no good.)  This should probably
2180    be a new gdbarch method or something.  */
2181 static CORE_ADDR
2182 svr4_truncate_ptr (CORE_ADDR addr)
2183 {
2184   if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2185     /* We don't need to truncate anything, and the bit twiddling below
2186        will fail due to overflow problems.  */
2187     return addr;
2188   else
2189     return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2190 }
2191 
2192 
2193 static void
2194 svr4_relocate_section_addresses (struct so_list *so,
2195                                  struct target_section *sec)
2196 {
2197   sec->addr    = svr4_truncate_ptr (sec->addr    + LM_ADDR_CHECK (so,
2198 								  sec->bfd));
2199   sec->endaddr = svr4_truncate_ptr (sec->endaddr + LM_ADDR_CHECK (so,
2200 								  sec->bfd));
2201 }
2202 
2203 
2204 /* Architecture-specific operations.  */
2205 
2206 /* Per-architecture data key.  */
2207 static struct gdbarch_data *solib_svr4_data;
2208 
2209 struct solib_svr4_ops
2210 {
2211   /* Return a description of the layout of `struct link_map'.  */
2212   struct link_map_offsets *(*fetch_link_map_offsets)(void);
2213 };
2214 
2215 /* Return a default for the architecture-specific operations.  */
2216 
2217 static void *
2218 solib_svr4_init (struct obstack *obstack)
2219 {
2220   struct solib_svr4_ops *ops;
2221 
2222   ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2223   ops->fetch_link_map_offsets = NULL;
2224   return ops;
2225 }
2226 
2227 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2228    GDBARCH to FLMO.  Also, install SVR4 solib_ops into GDBARCH.  */
2229 
2230 void
2231 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2232                                        struct link_map_offsets *(*flmo) (void))
2233 {
2234   struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2235 
2236   ops->fetch_link_map_offsets = flmo;
2237 
2238   set_solib_ops (gdbarch, &svr4_so_ops);
2239 }
2240 
2241 /* Fetch a link_map_offsets structure using the architecture-specific
2242    `struct link_map_offsets' fetcher.  */
2243 
2244 static struct link_map_offsets *
2245 svr4_fetch_link_map_offsets (void)
2246 {
2247   struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2248 
2249   gdb_assert (ops->fetch_link_map_offsets);
2250   return ops->fetch_link_map_offsets ();
2251 }
2252 
2253 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise.  */
2254 
2255 static int
2256 svr4_have_link_map_offsets (void)
2257 {
2258   struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2259 
2260   return (ops->fetch_link_map_offsets != NULL);
2261 }
2262 
2263 
2264 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2265    `struct r_debug' and a `struct link_map' that are binary compatible
2266    with the origional SVR4 implementation.  */
2267 
2268 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2269    for an ILP32 SVR4 system.  */
2270 
2271 struct link_map_offsets *
2272 svr4_ilp32_fetch_link_map_offsets (void)
2273 {
2274   static struct link_map_offsets lmo;
2275   static struct link_map_offsets *lmp = NULL;
2276 
2277   if (lmp == NULL)
2278     {
2279       lmp = &lmo;
2280 
2281       lmo.r_version_offset = 0;
2282       lmo.r_version_size = 4;
2283       lmo.r_map_offset = 4;
2284       lmo.r_brk_offset = 8;
2285       lmo.r_ldsomap_offset = 20;
2286 
2287       /* Everything we need is in the first 20 bytes.  */
2288       lmo.link_map_size = 20;
2289       lmo.l_addr_offset = 0;
2290       lmo.l_name_offset = 4;
2291       lmo.l_ld_offset = 8;
2292       lmo.l_next_offset = 12;
2293       lmo.l_prev_offset = 16;
2294     }
2295 
2296   return lmp;
2297 }
2298 
2299 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2300    for an LP64 SVR4 system.  */
2301 
2302 struct link_map_offsets *
2303 svr4_lp64_fetch_link_map_offsets (void)
2304 {
2305   static struct link_map_offsets lmo;
2306   static struct link_map_offsets *lmp = NULL;
2307 
2308   if (lmp == NULL)
2309     {
2310       lmp = &lmo;
2311 
2312       lmo.r_version_offset = 0;
2313       lmo.r_version_size = 4;
2314       lmo.r_map_offset = 8;
2315       lmo.r_brk_offset = 16;
2316       lmo.r_ldsomap_offset = 40;
2317 
2318       /* Everything we need is in the first 40 bytes.  */
2319       lmo.link_map_size = 40;
2320       lmo.l_addr_offset = 0;
2321       lmo.l_name_offset = 8;
2322       lmo.l_ld_offset = 16;
2323       lmo.l_next_offset = 24;
2324       lmo.l_prev_offset = 32;
2325     }
2326 
2327   return lmp;
2328 }
2329 
2330 
2331 struct target_so_ops svr4_so_ops;
2332 
2333 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2334    different rule for symbol lookup.  The lookup begins here in the DSO, not in
2335    the main executable.  */
2336 
2337 static struct symbol *
2338 elf_lookup_lib_symbol (const struct objfile *objfile,
2339 		       const char *name,
2340 		       const domain_enum domain)
2341 {
2342   bfd *abfd;
2343 
2344   if (objfile == symfile_objfile)
2345     abfd = exec_bfd;
2346   else
2347     {
2348       /* OBJFILE should have been passed as the non-debug one.  */
2349       gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2350 
2351       abfd = objfile->obfd;
2352     }
2353 
2354   if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2355     return NULL;
2356 
2357   return lookup_global_symbol_from_objfile (objfile, name, domain);
2358 }
2359 
2360 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2361 
2362 void
2363 _initialize_svr4_solib (void)
2364 {
2365   solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2366   solib_svr4_pspace_data
2367     = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2368 
2369   svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2370   svr4_so_ops.free_so = svr4_free_so;
2371   svr4_so_ops.clear_solib = svr4_clear_solib;
2372   svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2373   svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2374   svr4_so_ops.current_sos = svr4_current_sos;
2375   svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2376   svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2377   svr4_so_ops.bfd_open = solib_bfd_open;
2378   svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2379   svr4_so_ops.same = svr4_same;
2380   svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2381 }
2382