1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger. 2 3 Copyright (C) 1990-2014 Free Software Foundation, Inc. 4 5 This file is part of GDB. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 19 20 #include "defs.h" 21 22 #include "elf/external.h" 23 #include "elf/common.h" 24 #include "elf/mips.h" 25 26 #include "symtab.h" 27 #include "bfd.h" 28 #include "symfile.h" 29 #include "objfiles.h" 30 #include "gdbcore.h" 31 #include "target.h" 32 #include "inferior.h" 33 #include "regcache.h" 34 #include "gdbthread.h" 35 #include "observer.h" 36 37 #include "gdb_assert.h" 38 39 #include "solist.h" 40 #include "solib.h" 41 #include "solib-svr4.h" 42 43 #include "bfd-target.h" 44 #include "elf-bfd.h" 45 #include "exec.h" 46 #include "auxv.h" 47 #include "exceptions.h" 48 #include "gdb_bfd.h" 49 #include "probe.h" 50 51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void); 52 static int svr4_have_link_map_offsets (void); 53 static void svr4_relocate_main_executable (void); 54 static void svr4_free_library_list (void *p_list); 55 56 /* Link map info to include in an allocated so_list entry. */ 57 58 struct lm_info 59 { 60 /* Amount by which addresses in the binary should be relocated to 61 match the inferior. The direct inferior value is L_ADDR_INFERIOR. 62 When prelinking is involved and the prelink base address changes, 63 we may need a different offset - the recomputed offset is in L_ADDR. 64 It is commonly the same value. It is cached as we want to warn about 65 the difference and compute it only once. L_ADDR is valid 66 iff L_ADDR_P. */ 67 CORE_ADDR l_addr, l_addr_inferior; 68 unsigned int l_addr_p : 1; 69 70 /* The target location of lm. */ 71 CORE_ADDR lm_addr; 72 73 /* Values read in from inferior's fields of the same name. */ 74 CORE_ADDR l_ld, l_next, l_prev, l_name; 75 }; 76 77 /* On SVR4 systems, a list of symbols in the dynamic linker where 78 GDB can try to place a breakpoint to monitor shared library 79 events. 80 81 If none of these symbols are found, or other errors occur, then 82 SVR4 systems will fall back to using a symbol as the "startup 83 mapping complete" breakpoint address. */ 84 85 static const char * const solib_break_names[] = 86 { 87 "r_debug_state", 88 "_r_debug_state", 89 "_dl_debug_state", 90 "rtld_db_dlactivity", 91 "__dl_rtld_db_dlactivity", 92 "_rtld_debug_state", 93 94 NULL 95 }; 96 97 static const char * const bkpt_names[] = 98 { 99 "_start", 100 "__start", 101 "main", 102 NULL 103 }; 104 105 static const char * const main_name_list[] = 106 { 107 "main_$main", 108 NULL 109 }; 110 111 /* What to do when a probe stop occurs. */ 112 113 enum probe_action 114 { 115 /* Something went seriously wrong. Stop using probes and 116 revert to using the older interface. */ 117 PROBES_INTERFACE_FAILED, 118 119 /* No action is required. The shared object list is still 120 valid. */ 121 DO_NOTHING, 122 123 /* The shared object list should be reloaded entirely. */ 124 FULL_RELOAD, 125 126 /* Attempt to incrementally update the shared object list. If 127 the update fails or is not possible, fall back to reloading 128 the list in full. */ 129 UPDATE_OR_RELOAD, 130 }; 131 132 /* A probe's name and its associated action. */ 133 134 struct probe_info 135 { 136 /* The name of the probe. */ 137 const char *name; 138 139 /* What to do when a probe stop occurs. */ 140 enum probe_action action; 141 }; 142 143 /* A list of named probes and their associated actions. If all 144 probes are present in the dynamic linker then the probes-based 145 interface will be used. */ 146 147 static const struct probe_info probe_info[] = 148 { 149 { "init_start", DO_NOTHING }, 150 { "init_complete", FULL_RELOAD }, 151 { "map_start", DO_NOTHING }, 152 { "map_failed", DO_NOTHING }, 153 { "reloc_complete", UPDATE_OR_RELOAD }, 154 { "unmap_start", DO_NOTHING }, 155 { "unmap_complete", FULL_RELOAD }, 156 }; 157 158 #define NUM_PROBES ARRAY_SIZE (probe_info) 159 160 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent 161 the same shared library. */ 162 163 static int 164 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name) 165 { 166 if (strcmp (gdb_so_name, inferior_so_name) == 0) 167 return 1; 168 169 /* On Solaris, when starting inferior we think that dynamic linker is 170 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries 171 contains /lib/ld.so.1. Sometimes one file is a link to another, but 172 sometimes they have identical content, but are not linked to each 173 other. We don't restrict this check for Solaris, but the chances 174 of running into this situation elsewhere are very low. */ 175 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0 176 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0) 177 return 1; 178 179 /* Similarly, we observed the same issue with sparc64, but with 180 different locations. */ 181 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0 182 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0) 183 return 1; 184 185 return 0; 186 } 187 188 static int 189 svr4_same (struct so_list *gdb, struct so_list *inferior) 190 { 191 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name)); 192 } 193 194 static struct lm_info * 195 lm_info_read (CORE_ADDR lm_addr) 196 { 197 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets (); 198 gdb_byte *lm; 199 struct lm_info *lm_info; 200 struct cleanup *back_to; 201 202 lm = xmalloc (lmo->link_map_size); 203 back_to = make_cleanup (xfree, lm); 204 205 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0) 206 { 207 warning (_("Error reading shared library list entry at %s"), 208 paddress (target_gdbarch (), lm_addr)), 209 lm_info = NULL; 210 } 211 else 212 { 213 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 214 215 lm_info = xzalloc (sizeof (*lm_info)); 216 lm_info->lm_addr = lm_addr; 217 218 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset], 219 ptr_type); 220 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type); 221 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset], 222 ptr_type); 223 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset], 224 ptr_type); 225 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset], 226 ptr_type); 227 } 228 229 do_cleanups (back_to); 230 231 return lm_info; 232 } 233 234 static int 235 has_lm_dynamic_from_link_map (void) 236 { 237 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets (); 238 239 return lmo->l_ld_offset >= 0; 240 } 241 242 static CORE_ADDR 243 lm_addr_check (const struct so_list *so, bfd *abfd) 244 { 245 if (!so->lm_info->l_addr_p) 246 { 247 struct bfd_section *dyninfo_sect; 248 CORE_ADDR l_addr, l_dynaddr, dynaddr; 249 250 l_addr = so->lm_info->l_addr_inferior; 251 252 if (! abfd || ! has_lm_dynamic_from_link_map ()) 253 goto set_addr; 254 255 l_dynaddr = so->lm_info->l_ld; 256 257 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic"); 258 if (dyninfo_sect == NULL) 259 goto set_addr; 260 261 dynaddr = bfd_section_vma (abfd, dyninfo_sect); 262 263 if (dynaddr + l_addr != l_dynaddr) 264 { 265 CORE_ADDR align = 0x1000; 266 CORE_ADDR minpagesize = align; 267 268 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour) 269 { 270 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header; 271 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr; 272 int i; 273 274 align = 1; 275 276 for (i = 0; i < ehdr->e_phnum; i++) 277 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align) 278 align = phdr[i].p_align; 279 280 minpagesize = get_elf_backend_data (abfd)->minpagesize; 281 } 282 283 /* Turn it into a mask. */ 284 align--; 285 286 /* If the changes match the alignment requirements, we 287 assume we're using a core file that was generated by the 288 same binary, just prelinked with a different base offset. 289 If it doesn't match, we may have a different binary, the 290 same binary with the dynamic table loaded at an unrelated 291 location, or anything, really. To avoid regressions, 292 don't adjust the base offset in the latter case, although 293 odds are that, if things really changed, debugging won't 294 quite work. 295 296 One could expect more the condition 297 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0) 298 but the one below is relaxed for PPC. The PPC kernel supports 299 either 4k or 64k page sizes. To be prepared for 64k pages, 300 PPC ELF files are built using an alignment requirement of 64k. 301 However, when running on a kernel supporting 4k pages, the memory 302 mapping of the library may not actually happen on a 64k boundary! 303 304 (In the usual case where (l_addr & align) == 0, this check is 305 equivalent to the possibly expected check above.) 306 307 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */ 308 309 l_addr = l_dynaddr - dynaddr; 310 311 if ((l_addr & (minpagesize - 1)) == 0 312 && (l_addr & align) == ((l_dynaddr - dynaddr) & align)) 313 { 314 if (info_verbose) 315 printf_unfiltered (_("Using PIC (Position Independent Code) " 316 "prelink displacement %s for \"%s\".\n"), 317 paddress (target_gdbarch (), l_addr), 318 so->so_name); 319 } 320 else 321 { 322 /* There is no way to verify the library file matches. prelink 323 can during prelinking of an unprelinked file (or unprelinking 324 of a prelinked file) shift the DYNAMIC segment by arbitrary 325 offset without any page size alignment. There is no way to 326 find out the ELF header and/or Program Headers for a limited 327 verification if it they match. One could do a verification 328 of the DYNAMIC segment. Still the found address is the best 329 one GDB could find. */ 330 331 warning (_(".dynamic section for \"%s\" " 332 "is not at the expected address " 333 "(wrong library or version mismatch?)"), so->so_name); 334 } 335 } 336 337 set_addr: 338 so->lm_info->l_addr = l_addr; 339 so->lm_info->l_addr_p = 1; 340 } 341 342 return so->lm_info->l_addr; 343 } 344 345 /* Per pspace SVR4 specific data. */ 346 347 struct svr4_info 348 { 349 CORE_ADDR debug_base; /* Base of dynamic linker structures. */ 350 351 /* Validity flag for debug_loader_offset. */ 352 int debug_loader_offset_p; 353 354 /* Load address for the dynamic linker, inferred. */ 355 CORE_ADDR debug_loader_offset; 356 357 /* Name of the dynamic linker, valid if debug_loader_offset_p. */ 358 char *debug_loader_name; 359 360 /* Load map address for the main executable. */ 361 CORE_ADDR main_lm_addr; 362 363 CORE_ADDR interp_text_sect_low; 364 CORE_ADDR interp_text_sect_high; 365 CORE_ADDR interp_plt_sect_low; 366 CORE_ADDR interp_plt_sect_high; 367 368 /* Nonzero if the list of objects was last obtained from the target 369 via qXfer:libraries-svr4:read. */ 370 int using_xfer; 371 372 /* Table of struct probe_and_action instances, used by the 373 probes-based interface to map breakpoint addresses to probes 374 and their associated actions. Lookup is performed using 375 probe_and_action->probe->address. */ 376 htab_t probes_table; 377 378 /* List of objects loaded into the inferior, used by the probes- 379 based interface. */ 380 struct so_list *solib_list; 381 }; 382 383 /* Per-program-space data key. */ 384 static const struct program_space_data *solib_svr4_pspace_data; 385 386 /* Free the probes table. */ 387 388 static void 389 free_probes_table (struct svr4_info *info) 390 { 391 if (info->probes_table == NULL) 392 return; 393 394 htab_delete (info->probes_table); 395 info->probes_table = NULL; 396 } 397 398 /* Free the solib list. */ 399 400 static void 401 free_solib_list (struct svr4_info *info) 402 { 403 svr4_free_library_list (&info->solib_list); 404 info->solib_list = NULL; 405 } 406 407 static void 408 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg) 409 { 410 struct svr4_info *info = arg; 411 412 free_probes_table (info); 413 free_solib_list (info); 414 415 xfree (info); 416 } 417 418 /* Get the current svr4 data. If none is found yet, add it now. This 419 function always returns a valid object. */ 420 421 static struct svr4_info * 422 get_svr4_info (void) 423 { 424 struct svr4_info *info; 425 426 info = program_space_data (current_program_space, solib_svr4_pspace_data); 427 if (info != NULL) 428 return info; 429 430 info = XZALLOC (struct svr4_info); 431 set_program_space_data (current_program_space, solib_svr4_pspace_data, info); 432 return info; 433 } 434 435 /* Local function prototypes */ 436 437 static int match_main (const char *); 438 439 /* Read program header TYPE from inferior memory. The header is found 440 by scanning the OS auxillary vector. 441 442 If TYPE == -1, return the program headers instead of the contents of 443 one program header. 444 445 Return a pointer to allocated memory holding the program header contents, 446 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the 447 size of those contents is returned to P_SECT_SIZE. Likewise, the target 448 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */ 449 450 static gdb_byte * 451 read_program_header (int type, int *p_sect_size, int *p_arch_size) 452 { 453 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 454 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0; 455 int arch_size, sect_size; 456 CORE_ADDR sect_addr; 457 gdb_byte *buf; 458 int pt_phdr_p = 0; 459 460 /* Get required auxv elements from target. */ 461 if (target_auxv_search (¤t_target, AT_PHDR, &at_phdr) <= 0) 462 return 0; 463 if (target_auxv_search (¤t_target, AT_PHENT, &at_phent) <= 0) 464 return 0; 465 if (target_auxv_search (¤t_target, AT_PHNUM, &at_phnum) <= 0) 466 return 0; 467 if (!at_phdr || !at_phnum) 468 return 0; 469 470 /* Determine ELF architecture type. */ 471 if (at_phent == sizeof (Elf32_External_Phdr)) 472 arch_size = 32; 473 else if (at_phent == sizeof (Elf64_External_Phdr)) 474 arch_size = 64; 475 else 476 return 0; 477 478 /* Find the requested segment. */ 479 if (type == -1) 480 { 481 sect_addr = at_phdr; 482 sect_size = at_phent * at_phnum; 483 } 484 else if (arch_size == 32) 485 { 486 Elf32_External_Phdr phdr; 487 int i; 488 489 /* Search for requested PHDR. */ 490 for (i = 0; i < at_phnum; i++) 491 { 492 int p_type; 493 494 if (target_read_memory (at_phdr + i * sizeof (phdr), 495 (gdb_byte *)&phdr, sizeof (phdr))) 496 return 0; 497 498 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type, 499 4, byte_order); 500 501 if (p_type == PT_PHDR) 502 { 503 pt_phdr_p = 1; 504 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr, 505 4, byte_order); 506 } 507 508 if (p_type == type) 509 break; 510 } 511 512 if (i == at_phnum) 513 return 0; 514 515 /* Retrieve address and size. */ 516 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr, 517 4, byte_order); 518 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz, 519 4, byte_order); 520 } 521 else 522 { 523 Elf64_External_Phdr phdr; 524 int i; 525 526 /* Search for requested PHDR. */ 527 for (i = 0; i < at_phnum; i++) 528 { 529 int p_type; 530 531 if (target_read_memory (at_phdr + i * sizeof (phdr), 532 (gdb_byte *)&phdr, sizeof (phdr))) 533 return 0; 534 535 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type, 536 4, byte_order); 537 538 if (p_type == PT_PHDR) 539 { 540 pt_phdr_p = 1; 541 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr, 542 8, byte_order); 543 } 544 545 if (p_type == type) 546 break; 547 } 548 549 if (i == at_phnum) 550 return 0; 551 552 /* Retrieve address and size. */ 553 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr, 554 8, byte_order); 555 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz, 556 8, byte_order); 557 } 558 559 /* PT_PHDR is optional, but we really need it 560 for PIE to make this work in general. */ 561 562 if (pt_phdr_p) 563 { 564 /* at_phdr is real address in memory. pt_phdr is what pheader says it is. 565 Relocation offset is the difference between the two. */ 566 sect_addr = sect_addr + (at_phdr - pt_phdr); 567 } 568 569 /* Read in requested program header. */ 570 buf = xmalloc (sect_size); 571 if (target_read_memory (sect_addr, buf, sect_size)) 572 { 573 xfree (buf); 574 return NULL; 575 } 576 577 if (p_arch_size) 578 *p_arch_size = arch_size; 579 if (p_sect_size) 580 *p_sect_size = sect_size; 581 582 return buf; 583 } 584 585 586 /* Return program interpreter string. */ 587 static char * 588 find_program_interpreter (void) 589 { 590 gdb_byte *buf = NULL; 591 592 /* If we have an exec_bfd, use its section table. */ 593 if (exec_bfd 594 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour) 595 { 596 struct bfd_section *interp_sect; 597 598 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp"); 599 if (interp_sect != NULL) 600 { 601 int sect_size = bfd_section_size (exec_bfd, interp_sect); 602 603 buf = xmalloc (sect_size); 604 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size); 605 } 606 } 607 608 /* If we didn't find it, use the target auxillary vector. */ 609 if (!buf) 610 buf = read_program_header (PT_INTERP, NULL, NULL); 611 612 return (char *) buf; 613 } 614 615 616 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is 617 returned and the corresponding PTR is set. */ 618 619 static int 620 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr) 621 { 622 int arch_size, step, sect_size; 623 long dyn_tag; 624 CORE_ADDR dyn_ptr, dyn_addr; 625 gdb_byte *bufend, *bufstart, *buf; 626 Elf32_External_Dyn *x_dynp_32; 627 Elf64_External_Dyn *x_dynp_64; 628 struct bfd_section *sect; 629 struct target_section *target_section; 630 631 if (abfd == NULL) 632 return 0; 633 634 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour) 635 return 0; 636 637 arch_size = bfd_get_arch_size (abfd); 638 if (arch_size == -1) 639 return 0; 640 641 /* Find the start address of the .dynamic section. */ 642 sect = bfd_get_section_by_name (abfd, ".dynamic"); 643 if (sect == NULL) 644 return 0; 645 646 for (target_section = current_target_sections->sections; 647 target_section < current_target_sections->sections_end; 648 target_section++) 649 if (sect == target_section->the_bfd_section) 650 break; 651 if (target_section < current_target_sections->sections_end) 652 dyn_addr = target_section->addr; 653 else 654 { 655 /* ABFD may come from OBJFILE acting only as a symbol file without being 656 loaded into the target (see add_symbol_file_command). This case is 657 such fallback to the file VMA address without the possibility of 658 having the section relocated to its actual in-memory address. */ 659 660 dyn_addr = bfd_section_vma (abfd, sect); 661 } 662 663 /* Read in .dynamic from the BFD. We will get the actual value 664 from memory later. */ 665 sect_size = bfd_section_size (abfd, sect); 666 buf = bufstart = alloca (sect_size); 667 if (!bfd_get_section_contents (abfd, sect, 668 buf, 0, sect_size)) 669 return 0; 670 671 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */ 672 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn) 673 : sizeof (Elf64_External_Dyn); 674 for (bufend = buf + sect_size; 675 buf < bufend; 676 buf += step) 677 { 678 if (arch_size == 32) 679 { 680 x_dynp_32 = (Elf32_External_Dyn *) buf; 681 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag); 682 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr); 683 } 684 else 685 { 686 x_dynp_64 = (Elf64_External_Dyn *) buf; 687 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag); 688 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr); 689 } 690 if (dyn_tag == DT_NULL) 691 return 0; 692 if (dyn_tag == dyntag) 693 { 694 /* If requested, try to read the runtime value of this .dynamic 695 entry. */ 696 if (ptr) 697 { 698 struct type *ptr_type; 699 gdb_byte ptr_buf[8]; 700 CORE_ADDR ptr_addr; 701 702 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 703 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8; 704 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0) 705 dyn_ptr = extract_typed_address (ptr_buf, ptr_type); 706 *ptr = dyn_ptr; 707 } 708 return 1; 709 } 710 } 711 712 return 0; 713 } 714 715 /* Scan for DYNTAG in .dynamic section of the target's main executable, 716 found by consulting the OS auxillary vector. If DYNTAG is found 1 is 717 returned and the corresponding PTR is set. */ 718 719 static int 720 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr) 721 { 722 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 723 int sect_size, arch_size, step; 724 long dyn_tag; 725 CORE_ADDR dyn_ptr; 726 gdb_byte *bufend, *bufstart, *buf; 727 728 /* Read in .dynamic section. */ 729 buf = bufstart = read_program_header (PT_DYNAMIC, §_size, &arch_size); 730 if (!buf) 731 return 0; 732 733 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */ 734 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn) 735 : sizeof (Elf64_External_Dyn); 736 for (bufend = buf + sect_size; 737 buf < bufend; 738 buf += step) 739 { 740 if (arch_size == 32) 741 { 742 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf; 743 744 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag, 745 4, byte_order); 746 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr, 747 4, byte_order); 748 } 749 else 750 { 751 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf; 752 753 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag, 754 8, byte_order); 755 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr, 756 8, byte_order); 757 } 758 if (dyn_tag == DT_NULL) 759 break; 760 761 if (dyn_tag == dyntag) 762 { 763 if (ptr) 764 *ptr = dyn_ptr; 765 766 xfree (bufstart); 767 return 1; 768 } 769 } 770 771 xfree (bufstart); 772 return 0; 773 } 774 775 /* Locate the base address of dynamic linker structs for SVR4 elf 776 targets. 777 778 For SVR4 elf targets the address of the dynamic linker's runtime 779 structure is contained within the dynamic info section in the 780 executable file. The dynamic section is also mapped into the 781 inferior address space. Because the runtime loader fills in the 782 real address before starting the inferior, we have to read in the 783 dynamic info section from the inferior address space. 784 If there are any errors while trying to find the address, we 785 silently return 0, otherwise the found address is returned. */ 786 787 static CORE_ADDR 788 elf_locate_base (void) 789 { 790 struct minimal_symbol *msymbol; 791 CORE_ADDR dyn_ptr; 792 793 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this 794 instead of DT_DEBUG, although they sometimes contain an unused 795 DT_DEBUG. */ 796 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr) 797 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr)) 798 { 799 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 800 gdb_byte *pbuf; 801 int pbuf_size = TYPE_LENGTH (ptr_type); 802 803 pbuf = alloca (pbuf_size); 804 /* DT_MIPS_RLD_MAP contains a pointer to the address 805 of the dynamic link structure. */ 806 if (target_read_memory (dyn_ptr, pbuf, pbuf_size)) 807 return 0; 808 return extract_typed_address (pbuf, ptr_type); 809 } 810 811 /* Find DT_DEBUG. */ 812 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr) 813 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr)) 814 return dyn_ptr; 815 816 /* This may be a static executable. Look for the symbol 817 conventionally named _r_debug, as a last resort. */ 818 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile); 819 if (msymbol != NULL) 820 return SYMBOL_VALUE_ADDRESS (msymbol); 821 822 /* DT_DEBUG entry not found. */ 823 return 0; 824 } 825 826 /* Locate the base address of dynamic linker structs. 827 828 For both the SunOS and SVR4 shared library implementations, if the 829 inferior executable has been linked dynamically, there is a single 830 address somewhere in the inferior's data space which is the key to 831 locating all of the dynamic linker's runtime structures. This 832 address is the value of the debug base symbol. The job of this 833 function is to find and return that address, or to return 0 if there 834 is no such address (the executable is statically linked for example). 835 836 For SunOS, the job is almost trivial, since the dynamic linker and 837 all of it's structures are statically linked to the executable at 838 link time. Thus the symbol for the address we are looking for has 839 already been added to the minimal symbol table for the executable's 840 objfile at the time the symbol file's symbols were read, and all we 841 have to do is look it up there. Note that we explicitly do NOT want 842 to find the copies in the shared library. 843 844 The SVR4 version is a bit more complicated because the address 845 is contained somewhere in the dynamic info section. We have to go 846 to a lot more work to discover the address of the debug base symbol. 847 Because of this complexity, we cache the value we find and return that 848 value on subsequent invocations. Note there is no copy in the 849 executable symbol tables. */ 850 851 static CORE_ADDR 852 locate_base (struct svr4_info *info) 853 { 854 /* Check to see if we have a currently valid address, and if so, avoid 855 doing all this work again and just return the cached address. If 856 we have no cached address, try to locate it in the dynamic info 857 section for ELF executables. There's no point in doing any of this 858 though if we don't have some link map offsets to work with. */ 859 860 if (info->debug_base == 0 && svr4_have_link_map_offsets ()) 861 info->debug_base = elf_locate_base (); 862 return info->debug_base; 863 } 864 865 /* Find the first element in the inferior's dynamic link map, and 866 return its address in the inferior. Return zero if the address 867 could not be determined. 868 869 FIXME: Perhaps we should validate the info somehow, perhaps by 870 checking r_version for a known version number, or r_state for 871 RT_CONSISTENT. */ 872 873 static CORE_ADDR 874 solib_svr4_r_map (struct svr4_info *info) 875 { 876 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets (); 877 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 878 CORE_ADDR addr = 0; 879 volatile struct gdb_exception ex; 880 881 TRY_CATCH (ex, RETURN_MASK_ERROR) 882 { 883 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset, 884 ptr_type); 885 } 886 exception_print (gdb_stderr, ex); 887 return addr; 888 } 889 890 /* Find r_brk from the inferior's debug base. */ 891 892 static CORE_ADDR 893 solib_svr4_r_brk (struct svr4_info *info) 894 { 895 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets (); 896 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 897 898 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset, 899 ptr_type); 900 } 901 902 /* Find the link map for the dynamic linker (if it is not in the 903 normal list of loaded shared objects). */ 904 905 static CORE_ADDR 906 solib_svr4_r_ldsomap (struct svr4_info *info) 907 { 908 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets (); 909 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 910 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 911 ULONGEST version; 912 913 /* Check version, and return zero if `struct r_debug' doesn't have 914 the r_ldsomap member. */ 915 version 916 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset, 917 lmo->r_version_size, byte_order); 918 if (version < 2 || lmo->r_ldsomap_offset == -1) 919 return 0; 920 921 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset, 922 ptr_type); 923 } 924 925 /* On Solaris systems with some versions of the dynamic linker, 926 ld.so's l_name pointer points to the SONAME in the string table 927 rather than into writable memory. So that GDB can find shared 928 libraries when loading a core file generated by gcore, ensure that 929 memory areas containing the l_name string are saved in the core 930 file. */ 931 932 static int 933 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size) 934 { 935 struct svr4_info *info; 936 CORE_ADDR ldsomap; 937 struct so_list *new; 938 struct cleanup *old_chain; 939 CORE_ADDR name_lm; 940 941 info = get_svr4_info (); 942 943 info->debug_base = 0; 944 locate_base (info); 945 if (!info->debug_base) 946 return 0; 947 948 ldsomap = solib_svr4_r_ldsomap (info); 949 if (!ldsomap) 950 return 0; 951 952 new = XZALLOC (struct so_list); 953 old_chain = make_cleanup (xfree, new); 954 new->lm_info = lm_info_read (ldsomap); 955 make_cleanup (xfree, new->lm_info); 956 name_lm = new->lm_info ? new->lm_info->l_name : 0; 957 do_cleanups (old_chain); 958 959 return (name_lm >= vaddr && name_lm < vaddr + size); 960 } 961 962 /* Implement the "open_symbol_file_object" target_so_ops method. 963 964 If no open symbol file, attempt to locate and open the main symbol 965 file. On SVR4 systems, this is the first link map entry. If its 966 name is here, we can open it. Useful when attaching to a process 967 without first loading its symbol file. */ 968 969 static int 970 open_symbol_file_object (void *from_ttyp) 971 { 972 CORE_ADDR lm, l_name; 973 char *filename; 974 int errcode; 975 int from_tty = *(int *)from_ttyp; 976 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets (); 977 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr; 978 int l_name_size = TYPE_LENGTH (ptr_type); 979 gdb_byte *l_name_buf = xmalloc (l_name_size); 980 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf); 981 struct svr4_info *info = get_svr4_info (); 982 983 if (symfile_objfile) 984 if (!query (_("Attempt to reload symbols from process? "))) 985 { 986 do_cleanups (cleanups); 987 return 0; 988 } 989 990 /* Always locate the debug struct, in case it has moved. */ 991 info->debug_base = 0; 992 if (locate_base (info) == 0) 993 { 994 do_cleanups (cleanups); 995 return 0; /* failed somehow... */ 996 } 997 998 /* First link map member should be the executable. */ 999 lm = solib_svr4_r_map (info); 1000 if (lm == 0) 1001 { 1002 do_cleanups (cleanups); 1003 return 0; /* failed somehow... */ 1004 } 1005 1006 /* Read address of name from target memory to GDB. */ 1007 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size); 1008 1009 /* Convert the address to host format. */ 1010 l_name = extract_typed_address (l_name_buf, ptr_type); 1011 1012 if (l_name == 0) 1013 { 1014 do_cleanups (cleanups); 1015 return 0; /* No filename. */ 1016 } 1017 1018 /* Now fetch the filename from target memory. */ 1019 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode); 1020 make_cleanup (xfree, filename); 1021 1022 if (errcode) 1023 { 1024 warning (_("failed to read exec filename from attached file: %s"), 1025 safe_strerror (errcode)); 1026 do_cleanups (cleanups); 1027 return 0; 1028 } 1029 1030 /* Have a pathname: read the symbol file. */ 1031 symbol_file_add_main (filename, from_tty); 1032 1033 do_cleanups (cleanups); 1034 return 1; 1035 } 1036 1037 /* Data exchange structure for the XML parser as returned by 1038 svr4_current_sos_via_xfer_libraries. */ 1039 1040 struct svr4_library_list 1041 { 1042 struct so_list *head, **tailp; 1043 1044 /* Inferior address of struct link_map used for the main executable. It is 1045 NULL if not known. */ 1046 CORE_ADDR main_lm; 1047 }; 1048 1049 /* Implementation for target_so_ops.free_so. */ 1050 1051 static void 1052 svr4_free_so (struct so_list *so) 1053 { 1054 xfree (so->lm_info); 1055 } 1056 1057 /* Implement target_so_ops.clear_so. */ 1058 1059 static void 1060 svr4_clear_so (struct so_list *so) 1061 { 1062 if (so->lm_info != NULL) 1063 so->lm_info->l_addr_p = 0; 1064 } 1065 1066 /* Free so_list built so far (called via cleanup). */ 1067 1068 static void 1069 svr4_free_library_list (void *p_list) 1070 { 1071 struct so_list *list = *(struct so_list **) p_list; 1072 1073 while (list != NULL) 1074 { 1075 struct so_list *next = list->next; 1076 1077 free_so (list); 1078 list = next; 1079 } 1080 } 1081 1082 /* Copy library list. */ 1083 1084 static struct so_list * 1085 svr4_copy_library_list (struct so_list *src) 1086 { 1087 struct so_list *dst = NULL; 1088 struct so_list **link = &dst; 1089 1090 while (src != NULL) 1091 { 1092 struct so_list *new; 1093 1094 new = xmalloc (sizeof (struct so_list)); 1095 memcpy (new, src, sizeof (struct so_list)); 1096 1097 new->lm_info = xmalloc (sizeof (struct lm_info)); 1098 memcpy (new->lm_info, src->lm_info, sizeof (struct lm_info)); 1099 1100 new->next = NULL; 1101 *link = new; 1102 link = &new->next; 1103 1104 src = src->next; 1105 } 1106 1107 return dst; 1108 } 1109 1110 #ifdef HAVE_LIBEXPAT 1111 1112 #include "xml-support.h" 1113 1114 /* Handle the start of a <library> element. Note: new elements are added 1115 at the tail of the list, keeping the list in order. */ 1116 1117 static void 1118 library_list_start_library (struct gdb_xml_parser *parser, 1119 const struct gdb_xml_element *element, 1120 void *user_data, VEC(gdb_xml_value_s) *attributes) 1121 { 1122 struct svr4_library_list *list = user_data; 1123 const char *name = xml_find_attribute (attributes, "name")->value; 1124 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value; 1125 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value; 1126 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value; 1127 struct so_list *new_elem; 1128 1129 new_elem = XZALLOC (struct so_list); 1130 new_elem->lm_info = XZALLOC (struct lm_info); 1131 new_elem->lm_info->lm_addr = *lmp; 1132 new_elem->lm_info->l_addr_inferior = *l_addrp; 1133 new_elem->lm_info->l_ld = *l_ldp; 1134 1135 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1); 1136 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0; 1137 strcpy (new_elem->so_original_name, new_elem->so_name); 1138 1139 *list->tailp = new_elem; 1140 list->tailp = &new_elem->next; 1141 } 1142 1143 /* Handle the start of a <library-list-svr4> element. */ 1144 1145 static void 1146 svr4_library_list_start_list (struct gdb_xml_parser *parser, 1147 const struct gdb_xml_element *element, 1148 void *user_data, VEC(gdb_xml_value_s) *attributes) 1149 { 1150 struct svr4_library_list *list = user_data; 1151 const char *version = xml_find_attribute (attributes, "version")->value; 1152 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm"); 1153 1154 if (strcmp (version, "1.0") != 0) 1155 gdb_xml_error (parser, 1156 _("SVR4 Library list has unsupported version \"%s\""), 1157 version); 1158 1159 if (main_lm) 1160 list->main_lm = *(ULONGEST *) main_lm->value; 1161 } 1162 1163 /* The allowed elements and attributes for an XML library list. 1164 The root element is a <library-list>. */ 1165 1166 static const struct gdb_xml_attribute svr4_library_attributes[] = 1167 { 1168 { "name", GDB_XML_AF_NONE, NULL, NULL }, 1169 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, 1170 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, 1171 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, 1172 { NULL, GDB_XML_AF_NONE, NULL, NULL } 1173 }; 1174 1175 static const struct gdb_xml_element svr4_library_list_children[] = 1176 { 1177 { 1178 "library", svr4_library_attributes, NULL, 1179 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, 1180 library_list_start_library, NULL 1181 }, 1182 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } 1183 }; 1184 1185 static const struct gdb_xml_attribute svr4_library_list_attributes[] = 1186 { 1187 { "version", GDB_XML_AF_NONE, NULL, NULL }, 1188 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL }, 1189 { NULL, GDB_XML_AF_NONE, NULL, NULL } 1190 }; 1191 1192 static const struct gdb_xml_element svr4_library_list_elements[] = 1193 { 1194 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children, 1195 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL }, 1196 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } 1197 }; 1198 1199 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if 1200 1201 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such 1202 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be 1203 empty, caller is responsible for freeing all its entries. */ 1204 1205 static int 1206 svr4_parse_libraries (const char *document, struct svr4_library_list *list) 1207 { 1208 struct cleanup *back_to = make_cleanup (svr4_free_library_list, 1209 &list->head); 1210 1211 memset (list, 0, sizeof (*list)); 1212 list->tailp = &list->head; 1213 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd", 1214 svr4_library_list_elements, document, list) == 0) 1215 { 1216 /* Parsed successfully, keep the result. */ 1217 discard_cleanups (back_to); 1218 return 1; 1219 } 1220 1221 do_cleanups (back_to); 1222 return 0; 1223 } 1224 1225 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet. 1226 1227 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such 1228 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be 1229 empty, caller is responsible for freeing all its entries. 1230 1231 Note that ANNEX must be NULL if the remote does not explicitly allow 1232 qXfer:libraries-svr4:read packets with non-empty annexes. Support for 1233 this can be checked using target_augmented_libraries_svr4_read (). */ 1234 1235 static int 1236 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list, 1237 const char *annex) 1238 { 1239 char *svr4_library_document; 1240 int result; 1241 struct cleanup *back_to; 1242 1243 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ()); 1244 1245 /* Fetch the list of shared libraries. */ 1246 svr4_library_document = target_read_stralloc (¤t_target, 1247 TARGET_OBJECT_LIBRARIES_SVR4, 1248 annex); 1249 if (svr4_library_document == NULL) 1250 return 0; 1251 1252 back_to = make_cleanup (xfree, svr4_library_document); 1253 result = svr4_parse_libraries (svr4_library_document, list); 1254 do_cleanups (back_to); 1255 1256 return result; 1257 } 1258 1259 #else 1260 1261 static int 1262 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list, 1263 const char *annex) 1264 { 1265 return 0; 1266 } 1267 1268 #endif 1269 1270 /* If no shared library information is available from the dynamic 1271 linker, build a fallback list from other sources. */ 1272 1273 static struct so_list * 1274 svr4_default_sos (void) 1275 { 1276 struct svr4_info *info = get_svr4_info (); 1277 struct so_list *new; 1278 1279 if (!info->debug_loader_offset_p) 1280 return NULL; 1281 1282 new = XZALLOC (struct so_list); 1283 1284 new->lm_info = xzalloc (sizeof (struct lm_info)); 1285 1286 /* Nothing will ever check the other fields if we set l_addr_p. */ 1287 new->lm_info->l_addr = info->debug_loader_offset; 1288 new->lm_info->l_addr_p = 1; 1289 1290 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1); 1291 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0'; 1292 strcpy (new->so_original_name, new->so_name); 1293 1294 return new; 1295 } 1296 1297 /* Read the whole inferior libraries chain starting at address LM. 1298 Expect the first entry in the chain's previous entry to be PREV_LM. 1299 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the 1300 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according 1301 to it. Returns nonzero upon success. If zero is returned the 1302 entries stored to LINK_PTR_PTR are still valid although they may 1303 represent only part of the inferior library list. */ 1304 1305 static int 1306 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm, 1307 struct so_list ***link_ptr_ptr, int ignore_first) 1308 { 1309 struct so_list *first = NULL; 1310 CORE_ADDR next_lm; 1311 1312 for (; lm != 0; prev_lm = lm, lm = next_lm) 1313 { 1314 struct so_list *new; 1315 struct cleanup *old_chain; 1316 int errcode; 1317 char *buffer; 1318 1319 new = XZALLOC (struct so_list); 1320 old_chain = make_cleanup_free_so (new); 1321 1322 new->lm_info = lm_info_read (lm); 1323 if (new->lm_info == NULL) 1324 { 1325 do_cleanups (old_chain); 1326 return 0; 1327 } 1328 1329 next_lm = new->lm_info->l_next; 1330 1331 if (new->lm_info->l_prev != prev_lm) 1332 { 1333 warning (_("Corrupted shared library list: %s != %s"), 1334 paddress (target_gdbarch (), prev_lm), 1335 paddress (target_gdbarch (), new->lm_info->l_prev)); 1336 do_cleanups (old_chain); 1337 return 0; 1338 } 1339 1340 /* For SVR4 versions, the first entry in the link map is for the 1341 inferior executable, so we must ignore it. For some versions of 1342 SVR4, it has no name. For others (Solaris 2.3 for example), it 1343 does have a name, so we can no longer use a missing name to 1344 decide when to ignore it. */ 1345 if (ignore_first && new->lm_info->l_prev == 0) 1346 { 1347 struct svr4_info *info = get_svr4_info (); 1348 1349 first = new; 1350 info->main_lm_addr = new->lm_info->lm_addr; 1351 do_cleanups (old_chain); 1352 continue; 1353 } 1354 1355 /* Extract this shared object's name. */ 1356 target_read_string (new->lm_info->l_name, &buffer, 1357 SO_NAME_MAX_PATH_SIZE - 1, &errcode); 1358 if (errcode != 0) 1359 { 1360 /* If this entry's l_name address matches that of the 1361 inferior executable, then this is not a normal shared 1362 object, but (most likely) a vDSO. In this case, silently 1363 skip it; otherwise emit a warning. */ 1364 if (first == NULL 1365 || new->lm_info->l_name != first->lm_info->l_name) 1366 warning (_("Can't read pathname for load map: %s."), 1367 safe_strerror (errcode)); 1368 do_cleanups (old_chain); 1369 continue; 1370 } 1371 1372 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1); 1373 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0'; 1374 strcpy (new->so_original_name, new->so_name); 1375 xfree (buffer); 1376 1377 /* If this entry has no name, or its name matches the name 1378 for the main executable, don't include it in the list. */ 1379 if (! new->so_name[0] || match_main (new->so_name)) 1380 { 1381 do_cleanups (old_chain); 1382 continue; 1383 } 1384 1385 discard_cleanups (old_chain); 1386 new->next = 0; 1387 **link_ptr_ptr = new; 1388 *link_ptr_ptr = &new->next; 1389 } 1390 1391 return 1; 1392 } 1393 1394 /* Read the full list of currently loaded shared objects directly 1395 from the inferior, without referring to any libraries read and 1396 stored by the probes interface. Handle special cases relating 1397 to the first elements of the list. */ 1398 1399 static struct so_list * 1400 svr4_current_sos_direct (struct svr4_info *info) 1401 { 1402 CORE_ADDR lm; 1403 struct so_list *head = NULL; 1404 struct so_list **link_ptr = &head; 1405 struct cleanup *back_to; 1406 int ignore_first; 1407 struct svr4_library_list library_list; 1408 1409 /* Fall back to manual examination of the target if the packet is not 1410 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp 1411 tests a case where gdbserver cannot find the shared libraries list while 1412 GDB itself is able to find it via SYMFILE_OBJFILE. 1413 1414 Unfortunately statically linked inferiors will also fall back through this 1415 suboptimal code path. */ 1416 1417 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list, 1418 NULL); 1419 if (info->using_xfer) 1420 { 1421 if (library_list.main_lm) 1422 info->main_lm_addr = library_list.main_lm; 1423 1424 return library_list.head ? library_list.head : svr4_default_sos (); 1425 } 1426 1427 /* Always locate the debug struct, in case it has moved. */ 1428 info->debug_base = 0; 1429 locate_base (info); 1430 1431 /* If we can't find the dynamic linker's base structure, this 1432 must not be a dynamically linked executable. Hmm. */ 1433 if (! info->debug_base) 1434 return svr4_default_sos (); 1435 1436 /* Assume that everything is a library if the dynamic loader was loaded 1437 late by a static executable. */ 1438 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL) 1439 ignore_first = 0; 1440 else 1441 ignore_first = 1; 1442 1443 back_to = make_cleanup (svr4_free_library_list, &head); 1444 1445 /* Walk the inferior's link map list, and build our list of 1446 `struct so_list' nodes. */ 1447 lm = solib_svr4_r_map (info); 1448 if (lm) 1449 svr4_read_so_list (lm, 0, &link_ptr, ignore_first); 1450 1451 /* On Solaris, the dynamic linker is not in the normal list of 1452 shared objects, so make sure we pick it up too. Having 1453 symbol information for the dynamic linker is quite crucial 1454 for skipping dynamic linker resolver code. */ 1455 lm = solib_svr4_r_ldsomap (info); 1456 if (lm) 1457 svr4_read_so_list (lm, 0, &link_ptr, 0); 1458 1459 discard_cleanups (back_to); 1460 1461 if (head == NULL) 1462 return svr4_default_sos (); 1463 1464 return head; 1465 } 1466 1467 /* Implement the "current_sos" target_so_ops method. */ 1468 1469 static struct so_list * 1470 svr4_current_sos (void) 1471 { 1472 struct svr4_info *info = get_svr4_info (); 1473 1474 /* If the solib list has been read and stored by the probes 1475 interface then we return a copy of the stored list. */ 1476 if (info->solib_list != NULL) 1477 return svr4_copy_library_list (info->solib_list); 1478 1479 /* Otherwise obtain the solib list directly from the inferior. */ 1480 return svr4_current_sos_direct (info); 1481 } 1482 1483 /* Get the address of the link_map for a given OBJFILE. */ 1484 1485 CORE_ADDR 1486 svr4_fetch_objfile_link_map (struct objfile *objfile) 1487 { 1488 struct so_list *so; 1489 struct svr4_info *info = get_svr4_info (); 1490 1491 /* Cause svr4_current_sos() to be run if it hasn't been already. */ 1492 if (info->main_lm_addr == 0) 1493 solib_add (NULL, 0, ¤t_target, auto_solib_add); 1494 1495 /* svr4_current_sos() will set main_lm_addr for the main executable. */ 1496 if (objfile == symfile_objfile) 1497 return info->main_lm_addr; 1498 1499 /* The other link map addresses may be found by examining the list 1500 of shared libraries. */ 1501 for (so = master_so_list (); so; so = so->next) 1502 if (so->objfile == objfile) 1503 return so->lm_info->lm_addr; 1504 1505 /* Not found! */ 1506 return 0; 1507 } 1508 1509 /* On some systems, the only way to recognize the link map entry for 1510 the main executable file is by looking at its name. Return 1511 non-zero iff SONAME matches one of the known main executable names. */ 1512 1513 static int 1514 match_main (const char *soname) 1515 { 1516 const char * const *mainp; 1517 1518 for (mainp = main_name_list; *mainp != NULL; mainp++) 1519 { 1520 if (strcmp (soname, *mainp) == 0) 1521 return (1); 1522 } 1523 1524 return (0); 1525 } 1526 1527 /* Return 1 if PC lies in the dynamic symbol resolution code of the 1528 SVR4 run time loader. */ 1529 1530 int 1531 svr4_in_dynsym_resolve_code (CORE_ADDR pc) 1532 { 1533 struct svr4_info *info = get_svr4_info (); 1534 1535 return ((pc >= info->interp_text_sect_low 1536 && pc < info->interp_text_sect_high) 1537 || (pc >= info->interp_plt_sect_low 1538 && pc < info->interp_plt_sect_high) 1539 || in_plt_section (pc) 1540 || in_gnu_ifunc_stub (pc)); 1541 } 1542 1543 /* Given an executable's ABFD and target, compute the entry-point 1544 address. */ 1545 1546 static CORE_ADDR 1547 exec_entry_point (struct bfd *abfd, struct target_ops *targ) 1548 { 1549 CORE_ADDR addr; 1550 1551 /* KevinB wrote ... for most targets, the address returned by 1552 bfd_get_start_address() is the entry point for the start 1553 function. But, for some targets, bfd_get_start_address() returns 1554 the address of a function descriptor from which the entry point 1555 address may be extracted. This address is extracted by 1556 gdbarch_convert_from_func_ptr_addr(). The method 1557 gdbarch_convert_from_func_ptr_addr() is the merely the identify 1558 function for targets which don't use function descriptors. */ 1559 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (), 1560 bfd_get_start_address (abfd), 1561 targ); 1562 return gdbarch_addr_bits_remove (target_gdbarch (), addr); 1563 } 1564 1565 /* A probe and its associated action. */ 1566 1567 struct probe_and_action 1568 { 1569 /* The probe. */ 1570 struct probe *probe; 1571 1572 /* The action. */ 1573 enum probe_action action; 1574 }; 1575 1576 /* Returns a hash code for the probe_and_action referenced by p. */ 1577 1578 static hashval_t 1579 hash_probe_and_action (const void *p) 1580 { 1581 const struct probe_and_action *pa = p; 1582 1583 return (hashval_t) pa->probe->address; 1584 } 1585 1586 /* Returns non-zero if the probe_and_actions referenced by p1 and p2 1587 are equal. */ 1588 1589 static int 1590 equal_probe_and_action (const void *p1, const void *p2) 1591 { 1592 const struct probe_and_action *pa1 = p1; 1593 const struct probe_and_action *pa2 = p2; 1594 1595 return pa1->probe->address == pa2->probe->address; 1596 } 1597 1598 /* Register a solib event probe and its associated action in the 1599 probes table. */ 1600 1601 static void 1602 register_solib_event_probe (struct probe *probe, enum probe_action action) 1603 { 1604 struct svr4_info *info = get_svr4_info (); 1605 struct probe_and_action lookup, *pa; 1606 void **slot; 1607 1608 /* Create the probes table, if necessary. */ 1609 if (info->probes_table == NULL) 1610 info->probes_table = htab_create_alloc (1, hash_probe_and_action, 1611 equal_probe_and_action, 1612 xfree, xcalloc, xfree); 1613 1614 lookup.probe = probe; 1615 slot = htab_find_slot (info->probes_table, &lookup, INSERT); 1616 gdb_assert (*slot == HTAB_EMPTY_ENTRY); 1617 1618 pa = XCNEW (struct probe_and_action); 1619 pa->probe = probe; 1620 pa->action = action; 1621 1622 *slot = pa; 1623 } 1624 1625 /* Get the solib event probe at the specified location, and the 1626 action associated with it. Returns NULL if no solib event probe 1627 was found. */ 1628 1629 static struct probe_and_action * 1630 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address) 1631 { 1632 struct probe lookup_probe; 1633 struct probe_and_action lookup; 1634 void **slot; 1635 1636 lookup_probe.address = address; 1637 lookup.probe = &lookup_probe; 1638 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT); 1639 1640 if (slot == NULL) 1641 return NULL; 1642 1643 return (struct probe_and_action *) *slot; 1644 } 1645 1646 /* Decide what action to take when the specified solib event probe is 1647 hit. */ 1648 1649 static enum probe_action 1650 solib_event_probe_action (struct probe_and_action *pa) 1651 { 1652 enum probe_action action; 1653 unsigned probe_argc; 1654 struct frame_info *frame = get_current_frame (); 1655 1656 action = pa->action; 1657 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED) 1658 return action; 1659 1660 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD); 1661 1662 /* Check that an appropriate number of arguments has been supplied. 1663 We expect: 1664 arg0: Lmid_t lmid (mandatory) 1665 arg1: struct r_debug *debug_base (mandatory) 1666 arg2: struct link_map *new (optional, for incremental updates) */ 1667 probe_argc = get_probe_argument_count (pa->probe, frame); 1668 if (probe_argc == 2) 1669 action = FULL_RELOAD; 1670 else if (probe_argc < 2) 1671 action = PROBES_INTERFACE_FAILED; 1672 1673 return action; 1674 } 1675 1676 /* Populate the shared object list by reading the entire list of 1677 shared objects from the inferior. Handle special cases relating 1678 to the first elements of the list. Returns nonzero on success. */ 1679 1680 static int 1681 solist_update_full (struct svr4_info *info) 1682 { 1683 free_solib_list (info); 1684 info->solib_list = svr4_current_sos_direct (info); 1685 1686 return 1; 1687 } 1688 1689 /* Update the shared object list starting from the link-map entry 1690 passed by the linker in the probe's third argument. Returns 1691 nonzero if the list was successfully updated, or zero to indicate 1692 failure. */ 1693 1694 static int 1695 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm) 1696 { 1697 struct so_list *tail; 1698 CORE_ADDR prev_lm; 1699 1700 /* svr4_current_sos_direct contains logic to handle a number of 1701 special cases relating to the first elements of the list. To 1702 avoid duplicating this logic we defer to solist_update_full 1703 if the list is empty. */ 1704 if (info->solib_list == NULL) 1705 return 0; 1706 1707 /* Fall back to a full update if we are using a remote target 1708 that does not support incremental transfers. */ 1709 if (info->using_xfer && !target_augmented_libraries_svr4_read ()) 1710 return 0; 1711 1712 /* Walk to the end of the list. */ 1713 for (tail = info->solib_list; tail->next != NULL; tail = tail->next) 1714 /* Nothing. */; 1715 prev_lm = tail->lm_info->lm_addr; 1716 1717 /* Read the new objects. */ 1718 if (info->using_xfer) 1719 { 1720 struct svr4_library_list library_list; 1721 char annex[64]; 1722 1723 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s", 1724 phex_nz (lm, sizeof (lm)), 1725 phex_nz (prev_lm, sizeof (prev_lm))); 1726 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex)) 1727 return 0; 1728 1729 tail->next = library_list.head; 1730 } 1731 else 1732 { 1733 struct so_list **link = &tail->next; 1734 1735 /* IGNORE_FIRST may safely be set to zero here because the 1736 above check and deferral to solist_update_full ensures 1737 that this call to svr4_read_so_list will never see the 1738 first element. */ 1739 if (!svr4_read_so_list (lm, prev_lm, &link, 0)) 1740 return 0; 1741 } 1742 1743 return 1; 1744 } 1745 1746 /* Disable the probes-based linker interface and revert to the 1747 original interface. We don't reset the breakpoints as the 1748 ones set up for the probes-based interface are adequate. */ 1749 1750 static void 1751 disable_probes_interface_cleanup (void *arg) 1752 { 1753 struct svr4_info *info = get_svr4_info (); 1754 1755 warning (_("Probes-based dynamic linker interface failed.\n" 1756 "Reverting to original interface.\n")); 1757 1758 free_probes_table (info); 1759 free_solib_list (info); 1760 } 1761 1762 /* Update the solib list as appropriate when using the 1763 probes-based linker interface. Do nothing if using the 1764 standard interface. */ 1765 1766 static void 1767 svr4_handle_solib_event (void) 1768 { 1769 struct svr4_info *info = get_svr4_info (); 1770 struct probe_and_action *pa; 1771 enum probe_action action; 1772 struct cleanup *old_chain, *usm_chain; 1773 struct value *val; 1774 CORE_ADDR pc, debug_base, lm = 0; 1775 int is_initial_ns; 1776 struct frame_info *frame = get_current_frame (); 1777 1778 /* Do nothing if not using the probes interface. */ 1779 if (info->probes_table == NULL) 1780 return; 1781 1782 /* If anything goes wrong we revert to the original linker 1783 interface. */ 1784 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL); 1785 1786 pc = regcache_read_pc (get_current_regcache ()); 1787 pa = solib_event_probe_at (info, pc); 1788 if (pa == NULL) 1789 { 1790 do_cleanups (old_chain); 1791 return; 1792 } 1793 1794 action = solib_event_probe_action (pa); 1795 if (action == PROBES_INTERFACE_FAILED) 1796 { 1797 do_cleanups (old_chain); 1798 return; 1799 } 1800 1801 if (action == DO_NOTHING) 1802 { 1803 discard_cleanups (old_chain); 1804 return; 1805 } 1806 1807 /* evaluate_probe_argument looks up symbols in the dynamic linker 1808 using find_pc_section. find_pc_section is accelerated by a cache 1809 called the section map. The section map is invalidated every 1810 time a shared library is loaded or unloaded, and if the inferior 1811 is generating a lot of shared library events then the section map 1812 will be updated every time svr4_handle_solib_event is called. 1813 We called find_pc_section in svr4_create_solib_event_breakpoints, 1814 so we can guarantee that the dynamic linker's sections are in the 1815 section map. We can therefore inhibit section map updates across 1816 these calls to evaluate_probe_argument and save a lot of time. */ 1817 inhibit_section_map_updates (current_program_space); 1818 usm_chain = make_cleanup (resume_section_map_updates_cleanup, 1819 current_program_space); 1820 1821 val = evaluate_probe_argument (pa->probe, 1, frame); 1822 if (val == NULL) 1823 { 1824 do_cleanups (old_chain); 1825 return; 1826 } 1827 1828 debug_base = value_as_address (val); 1829 if (debug_base == 0) 1830 { 1831 do_cleanups (old_chain); 1832 return; 1833 } 1834 1835 /* Always locate the debug struct, in case it moved. */ 1836 info->debug_base = 0; 1837 if (locate_base (info) == 0) 1838 { 1839 do_cleanups (old_chain); 1840 return; 1841 } 1842 1843 /* GDB does not currently support libraries loaded via dlmopen 1844 into namespaces other than the initial one. We must ignore 1845 any namespace other than the initial namespace here until 1846 support for this is added to GDB. */ 1847 if (debug_base != info->debug_base) 1848 action = DO_NOTHING; 1849 1850 if (action == UPDATE_OR_RELOAD) 1851 { 1852 val = evaluate_probe_argument (pa->probe, 2, frame); 1853 if (val != NULL) 1854 lm = value_as_address (val); 1855 1856 if (lm == 0) 1857 action = FULL_RELOAD; 1858 } 1859 1860 /* Resume section map updates. */ 1861 do_cleanups (usm_chain); 1862 1863 if (action == UPDATE_OR_RELOAD) 1864 { 1865 if (!solist_update_incremental (info, lm)) 1866 action = FULL_RELOAD; 1867 } 1868 1869 if (action == FULL_RELOAD) 1870 { 1871 if (!solist_update_full (info)) 1872 { 1873 do_cleanups (old_chain); 1874 return; 1875 } 1876 } 1877 1878 discard_cleanups (old_chain); 1879 } 1880 1881 /* Helper function for svr4_update_solib_event_breakpoints. */ 1882 1883 static int 1884 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg) 1885 { 1886 struct bp_location *loc; 1887 1888 if (b->type != bp_shlib_event) 1889 { 1890 /* Continue iterating. */ 1891 return 0; 1892 } 1893 1894 for (loc = b->loc; loc != NULL; loc = loc->next) 1895 { 1896 struct svr4_info *info; 1897 struct probe_and_action *pa; 1898 1899 info = program_space_data (loc->pspace, solib_svr4_pspace_data); 1900 if (info == NULL || info->probes_table == NULL) 1901 continue; 1902 1903 pa = solib_event_probe_at (info, loc->address); 1904 if (pa == NULL) 1905 continue; 1906 1907 if (pa->action == DO_NOTHING) 1908 { 1909 if (b->enable_state == bp_disabled && stop_on_solib_events) 1910 enable_breakpoint (b); 1911 else if (b->enable_state == bp_enabled && !stop_on_solib_events) 1912 disable_breakpoint (b); 1913 } 1914 1915 break; 1916 } 1917 1918 /* Continue iterating. */ 1919 return 0; 1920 } 1921 1922 /* Enable or disable optional solib event breakpoints as appropriate. 1923 Called whenever stop_on_solib_events is changed. */ 1924 1925 static void 1926 svr4_update_solib_event_breakpoints (void) 1927 { 1928 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL); 1929 } 1930 1931 /* Create and register solib event breakpoints. PROBES is an array 1932 of NUM_PROBES elements, each of which is vector of probes. A 1933 solib event breakpoint will be created and registered for each 1934 probe. */ 1935 1936 static void 1937 svr4_create_probe_breakpoints (struct gdbarch *gdbarch, 1938 VEC (probe_p) **probes) 1939 { 1940 int i; 1941 1942 for (i = 0; i < NUM_PROBES; i++) 1943 { 1944 enum probe_action action = probe_info[i].action; 1945 struct probe *probe; 1946 int ix; 1947 1948 for (ix = 0; 1949 VEC_iterate (probe_p, probes[i], ix, probe); 1950 ++ix) 1951 { 1952 create_solib_event_breakpoint (gdbarch, probe->address); 1953 register_solib_event_probe (probe, action); 1954 } 1955 } 1956 1957 svr4_update_solib_event_breakpoints (); 1958 } 1959 1960 /* Both the SunOS and the SVR4 dynamic linkers call a marker function 1961 before and after mapping and unmapping shared libraries. The sole 1962 purpose of this method is to allow debuggers to set a breakpoint so 1963 they can track these changes. 1964 1965 Some versions of the glibc dynamic linker contain named probes 1966 to allow more fine grained stopping. Given the address of the 1967 original marker function, this function attempts to find these 1968 probes, and if found, sets breakpoints on those instead. If the 1969 probes aren't found, a single breakpoint is set on the original 1970 marker function. */ 1971 1972 static void 1973 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch, 1974 CORE_ADDR address) 1975 { 1976 struct obj_section *os; 1977 1978 os = find_pc_section (address); 1979 if (os != NULL) 1980 { 1981 int with_prefix; 1982 1983 for (with_prefix = 0; with_prefix <= 1; with_prefix++) 1984 { 1985 VEC (probe_p) *probes[NUM_PROBES]; 1986 int all_probes_found = 1; 1987 int checked_can_use_probe_arguments = 0; 1988 int i; 1989 1990 memset (probes, 0, sizeof (probes)); 1991 for (i = 0; i < NUM_PROBES; i++) 1992 { 1993 const char *name = probe_info[i].name; 1994 struct probe *p; 1995 char buf[32]; 1996 1997 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4 1998 shipped with an early version of the probes code in 1999 which the probes' names were prefixed with "rtld_" 2000 and the "map_failed" probe did not exist. The 2001 locations of the probes are otherwise the same, so 2002 we check for probes with prefixed names if probes 2003 with unprefixed names are not present. */ 2004 if (with_prefix) 2005 { 2006 xsnprintf (buf, sizeof (buf), "rtld_%s", name); 2007 name = buf; 2008 } 2009 2010 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name); 2011 2012 /* The "map_failed" probe did not exist in early 2013 versions of the probes code in which the probes' 2014 names were prefixed with "rtld_". */ 2015 if (strcmp (name, "rtld_map_failed") == 0) 2016 continue; 2017 2018 if (VEC_empty (probe_p, probes[i])) 2019 { 2020 all_probes_found = 0; 2021 break; 2022 } 2023 2024 /* Ensure probe arguments can be evaluated. */ 2025 if (!checked_can_use_probe_arguments) 2026 { 2027 p = VEC_index (probe_p, probes[i], 0); 2028 if (!can_evaluate_probe_arguments (p)) 2029 { 2030 all_probes_found = 0; 2031 break; 2032 } 2033 checked_can_use_probe_arguments = 1; 2034 } 2035 } 2036 2037 if (all_probes_found) 2038 svr4_create_probe_breakpoints (gdbarch, probes); 2039 2040 for (i = 0; i < NUM_PROBES; i++) 2041 VEC_free (probe_p, probes[i]); 2042 2043 if (all_probes_found) 2044 return; 2045 } 2046 } 2047 2048 create_solib_event_breakpoint (gdbarch, address); 2049 } 2050 2051 /* Helper function for gdb_bfd_lookup_symbol. */ 2052 2053 static int 2054 cmp_name_and_sec_flags (asymbol *sym, void *data) 2055 { 2056 return (strcmp (sym->name, (const char *) data) == 0 2057 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0); 2058 } 2059 /* Arrange for dynamic linker to hit breakpoint. 2060 2061 Both the SunOS and the SVR4 dynamic linkers have, as part of their 2062 debugger interface, support for arranging for the inferior to hit 2063 a breakpoint after mapping in the shared libraries. This function 2064 enables that breakpoint. 2065 2066 For SunOS, there is a special flag location (in_debugger) which we 2067 set to 1. When the dynamic linker sees this flag set, it will set 2068 a breakpoint at a location known only to itself, after saving the 2069 original contents of that place and the breakpoint address itself, 2070 in it's own internal structures. When we resume the inferior, it 2071 will eventually take a SIGTRAP when it runs into the breakpoint. 2072 We handle this (in a different place) by restoring the contents of 2073 the breakpointed location (which is only known after it stops), 2074 chasing around to locate the shared libraries that have been 2075 loaded, then resuming. 2076 2077 For SVR4, the debugger interface structure contains a member (r_brk) 2078 which is statically initialized at the time the shared library is 2079 built, to the offset of a function (_r_debug_state) which is guaran- 2080 teed to be called once before mapping in a library, and again when 2081 the mapping is complete. At the time we are examining this member, 2082 it contains only the unrelocated offset of the function, so we have 2083 to do our own relocation. Later, when the dynamic linker actually 2084 runs, it relocates r_brk to be the actual address of _r_debug_state(). 2085 2086 The debugger interface structure also contains an enumeration which 2087 is set to either RT_ADD or RT_DELETE prior to changing the mapping, 2088 depending upon whether or not the library is being mapped or unmapped, 2089 and then set to RT_CONSISTENT after the library is mapped/unmapped. */ 2090 2091 static int 2092 enable_break (struct svr4_info *info, int from_tty) 2093 { 2094 struct minimal_symbol *msymbol; 2095 const char * const *bkpt_namep; 2096 asection *interp_sect; 2097 char *interp_name; 2098 CORE_ADDR sym_addr; 2099 2100 info->interp_text_sect_low = info->interp_text_sect_high = 0; 2101 info->interp_plt_sect_low = info->interp_plt_sect_high = 0; 2102 2103 /* If we already have a shared library list in the target, and 2104 r_debug contains r_brk, set the breakpoint there - this should 2105 mean r_brk has already been relocated. Assume the dynamic linker 2106 is the object containing r_brk. */ 2107 2108 solib_add (NULL, from_tty, ¤t_target, auto_solib_add); 2109 sym_addr = 0; 2110 if (info->debug_base && solib_svr4_r_map (info) != 0) 2111 sym_addr = solib_svr4_r_brk (info); 2112 2113 if (sym_addr != 0) 2114 { 2115 struct obj_section *os; 2116 2117 sym_addr = gdbarch_addr_bits_remove 2118 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (), 2119 sym_addr, 2120 ¤t_target)); 2121 2122 /* On at least some versions of Solaris there's a dynamic relocation 2123 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if 2124 we get control before the dynamic linker has self-relocated. 2125 Check if SYM_ADDR is in a known section, if it is assume we can 2126 trust its value. This is just a heuristic though, it could go away 2127 or be replaced if it's getting in the way. 2128 2129 On ARM we need to know whether the ISA of rtld_db_dlactivity (or 2130 however it's spelled in your particular system) is ARM or Thumb. 2131 That knowledge is encoded in the address, if it's Thumb the low bit 2132 is 1. However, we've stripped that info above and it's not clear 2133 what all the consequences are of passing a non-addr_bits_remove'd 2134 address to svr4_create_solib_event_breakpoints. The call to 2135 find_pc_section verifies we know about the address and have some 2136 hope of computing the right kind of breakpoint to use (via 2137 symbol info). It does mean that GDB needs to be pointed at a 2138 non-stripped version of the dynamic linker in order to obtain 2139 information it already knows about. Sigh. */ 2140 2141 os = find_pc_section (sym_addr); 2142 if (os != NULL) 2143 { 2144 /* Record the relocated start and end address of the dynamic linker 2145 text and plt section for svr4_in_dynsym_resolve_code. */ 2146 bfd *tmp_bfd; 2147 CORE_ADDR load_addr; 2148 2149 tmp_bfd = os->objfile->obfd; 2150 load_addr = ANOFFSET (os->objfile->section_offsets, 2151 SECT_OFF_TEXT (os->objfile)); 2152 2153 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text"); 2154 if (interp_sect) 2155 { 2156 info->interp_text_sect_low = 2157 bfd_section_vma (tmp_bfd, interp_sect) + load_addr; 2158 info->interp_text_sect_high = 2159 info->interp_text_sect_low 2160 + bfd_section_size (tmp_bfd, interp_sect); 2161 } 2162 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt"); 2163 if (interp_sect) 2164 { 2165 info->interp_plt_sect_low = 2166 bfd_section_vma (tmp_bfd, interp_sect) + load_addr; 2167 info->interp_plt_sect_high = 2168 info->interp_plt_sect_low 2169 + bfd_section_size (tmp_bfd, interp_sect); 2170 } 2171 2172 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr); 2173 return 1; 2174 } 2175 } 2176 2177 /* Find the program interpreter; if not found, warn the user and drop 2178 into the old breakpoint at symbol code. */ 2179 interp_name = find_program_interpreter (); 2180 if (interp_name) 2181 { 2182 CORE_ADDR load_addr = 0; 2183 int load_addr_found = 0; 2184 int loader_found_in_list = 0; 2185 struct so_list *so; 2186 bfd *tmp_bfd = NULL; 2187 struct target_ops *tmp_bfd_target; 2188 volatile struct gdb_exception ex; 2189 2190 sym_addr = 0; 2191 2192 /* Now we need to figure out where the dynamic linker was 2193 loaded so that we can load its symbols and place a breakpoint 2194 in the dynamic linker itself. 2195 2196 This address is stored on the stack. However, I've been unable 2197 to find any magic formula to find it for Solaris (appears to 2198 be trivial on GNU/Linux). Therefore, we have to try an alternate 2199 mechanism to find the dynamic linker's base address. */ 2200 2201 TRY_CATCH (ex, RETURN_MASK_ALL) 2202 { 2203 tmp_bfd = solib_bfd_open (interp_name); 2204 } 2205 if (tmp_bfd == NULL) 2206 goto bkpt_at_symbol; 2207 2208 /* Now convert the TMP_BFD into a target. That way target, as 2209 well as BFD operations can be used. */ 2210 tmp_bfd_target = target_bfd_reopen (tmp_bfd); 2211 /* target_bfd_reopen acquired its own reference, so we can 2212 release ours now. */ 2213 gdb_bfd_unref (tmp_bfd); 2214 2215 /* On a running target, we can get the dynamic linker's base 2216 address from the shared library table. */ 2217 so = master_so_list (); 2218 while (so) 2219 { 2220 if (svr4_same_1 (interp_name, so->so_original_name)) 2221 { 2222 load_addr_found = 1; 2223 loader_found_in_list = 1; 2224 load_addr = lm_addr_check (so, tmp_bfd); 2225 break; 2226 } 2227 so = so->next; 2228 } 2229 2230 /* If we were not able to find the base address of the loader 2231 from our so_list, then try using the AT_BASE auxilliary entry. */ 2232 if (!load_addr_found) 2233 if (target_auxv_search (¤t_target, AT_BASE, &load_addr) > 0) 2234 { 2235 int addr_bit = gdbarch_addr_bit (target_gdbarch ()); 2236 2237 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so 2238 that `+ load_addr' will overflow CORE_ADDR width not creating 2239 invalid addresses like 0x101234567 for 32bit inferiors on 64bit 2240 GDB. */ 2241 2242 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT)) 2243 { 2244 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit; 2245 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd, 2246 tmp_bfd_target); 2247 2248 gdb_assert (load_addr < space_size); 2249 2250 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked 2251 64bit ld.so with 32bit executable, it should not happen. */ 2252 2253 if (tmp_entry_point < space_size 2254 && tmp_entry_point + load_addr >= space_size) 2255 load_addr -= space_size; 2256 } 2257 2258 load_addr_found = 1; 2259 } 2260 2261 /* Otherwise we find the dynamic linker's base address by examining 2262 the current pc (which should point at the entry point for the 2263 dynamic linker) and subtracting the offset of the entry point. 2264 2265 This is more fragile than the previous approaches, but is a good 2266 fallback method because it has actually been working well in 2267 most cases. */ 2268 if (!load_addr_found) 2269 { 2270 struct regcache *regcache 2271 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ()); 2272 2273 load_addr = (regcache_read_pc (regcache) 2274 - exec_entry_point (tmp_bfd, tmp_bfd_target)); 2275 } 2276 2277 if (!loader_found_in_list) 2278 { 2279 info->debug_loader_name = xstrdup (interp_name); 2280 info->debug_loader_offset_p = 1; 2281 info->debug_loader_offset = load_addr; 2282 solib_add (NULL, from_tty, ¤t_target, auto_solib_add); 2283 } 2284 2285 /* Record the relocated start and end address of the dynamic linker 2286 text and plt section for svr4_in_dynsym_resolve_code. */ 2287 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text"); 2288 if (interp_sect) 2289 { 2290 info->interp_text_sect_low = 2291 bfd_section_vma (tmp_bfd, interp_sect) + load_addr; 2292 info->interp_text_sect_high = 2293 info->interp_text_sect_low 2294 + bfd_section_size (tmp_bfd, interp_sect); 2295 } 2296 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt"); 2297 if (interp_sect) 2298 { 2299 info->interp_plt_sect_low = 2300 bfd_section_vma (tmp_bfd, interp_sect) + load_addr; 2301 info->interp_plt_sect_high = 2302 info->interp_plt_sect_low 2303 + bfd_section_size (tmp_bfd, interp_sect); 2304 } 2305 2306 /* Now try to set a breakpoint in the dynamic linker. */ 2307 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++) 2308 { 2309 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags, 2310 (void *) *bkpt_namep); 2311 if (sym_addr != 0) 2312 break; 2313 } 2314 2315 if (sym_addr != 0) 2316 /* Convert 'sym_addr' from a function pointer to an address. 2317 Because we pass tmp_bfd_target instead of the current 2318 target, this will always produce an unrelocated value. */ 2319 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (), 2320 sym_addr, 2321 tmp_bfd_target); 2322 2323 /* We're done with both the temporary bfd and target. Closing 2324 the target closes the underlying bfd, because it holds the 2325 only remaining reference. */ 2326 target_close (tmp_bfd_target); 2327 2328 if (sym_addr != 0) 2329 { 2330 svr4_create_solib_event_breakpoints (target_gdbarch (), 2331 load_addr + sym_addr); 2332 xfree (interp_name); 2333 return 1; 2334 } 2335 2336 /* For whatever reason we couldn't set a breakpoint in the dynamic 2337 linker. Warn and drop into the old code. */ 2338 bkpt_at_symbol: 2339 xfree (interp_name); 2340 warning (_("Unable to find dynamic linker breakpoint function.\n" 2341 "GDB will be unable to debug shared library initializers\n" 2342 "and track explicitly loaded dynamic code.")); 2343 } 2344 2345 /* Scan through the lists of symbols, trying to look up the symbol and 2346 set a breakpoint there. Terminate loop when we/if we succeed. */ 2347 2348 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++) 2349 { 2350 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile); 2351 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0)) 2352 { 2353 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol); 2354 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (), 2355 sym_addr, 2356 ¤t_target); 2357 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr); 2358 return 1; 2359 } 2360 } 2361 2362 if (interp_name != NULL && !current_inferior ()->attach_flag) 2363 { 2364 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++) 2365 { 2366 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile); 2367 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0)) 2368 { 2369 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol); 2370 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (), 2371 sym_addr, 2372 ¤t_target); 2373 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr); 2374 return 1; 2375 } 2376 } 2377 } 2378 return 0; 2379 } 2380 2381 /* Implement the "special_symbol_handling" target_so_ops method. */ 2382 2383 static void 2384 svr4_special_symbol_handling (void) 2385 { 2386 /* Nothing to do. */ 2387 } 2388 2389 /* Read the ELF program headers from ABFD. Return the contents and 2390 set *PHDRS_SIZE to the size of the program headers. */ 2391 2392 static gdb_byte * 2393 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size) 2394 { 2395 Elf_Internal_Ehdr *ehdr; 2396 gdb_byte *buf; 2397 2398 ehdr = elf_elfheader (abfd); 2399 2400 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize; 2401 if (*phdrs_size == 0) 2402 return NULL; 2403 2404 buf = xmalloc (*phdrs_size); 2405 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0 2406 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size) 2407 { 2408 xfree (buf); 2409 return NULL; 2410 } 2411 2412 return buf; 2413 } 2414 2415 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior 2416 exec_bfd. Otherwise return 0. 2417 2418 We relocate all of the sections by the same amount. This 2419 behavior is mandated by recent editions of the System V ABI. 2420 According to the System V Application Binary Interface, 2421 Edition 4.1, page 5-5: 2422 2423 ... Though the system chooses virtual addresses for 2424 individual processes, it maintains the segments' relative 2425 positions. Because position-independent code uses relative 2426 addressesing between segments, the difference between 2427 virtual addresses in memory must match the difference 2428 between virtual addresses in the file. The difference 2429 between the virtual address of any segment in memory and 2430 the corresponding virtual address in the file is thus a 2431 single constant value for any one executable or shared 2432 object in a given process. This difference is the base 2433 address. One use of the base address is to relocate the 2434 memory image of the program during dynamic linking. 2435 2436 The same language also appears in Edition 4.0 of the System V 2437 ABI and is left unspecified in some of the earlier editions. 2438 2439 Decide if the objfile needs to be relocated. As indicated above, we will 2440 only be here when execution is stopped. But during attachment PC can be at 2441 arbitrary address therefore regcache_read_pc can be misleading (contrary to 2442 the auxv AT_ENTRY value). Moreover for executable with interpreter section 2443 regcache_read_pc would point to the interpreter and not the main executable. 2444 2445 So, to summarize, relocations are necessary when the start address obtained 2446 from the executable is different from the address in auxv AT_ENTRY entry. 2447 2448 [ The astute reader will note that we also test to make sure that 2449 the executable in question has the DYNAMIC flag set. It is my 2450 opinion that this test is unnecessary (undesirable even). It 2451 was added to avoid inadvertent relocation of an executable 2452 whose e_type member in the ELF header is not ET_DYN. There may 2453 be a time in the future when it is desirable to do relocations 2454 on other types of files as well in which case this condition 2455 should either be removed or modified to accomodate the new file 2456 type. - Kevin, Nov 2000. ] */ 2457 2458 static int 2459 svr4_exec_displacement (CORE_ADDR *displacementp) 2460 { 2461 /* ENTRY_POINT is a possible function descriptor - before 2462 a call to gdbarch_convert_from_func_ptr_addr. */ 2463 CORE_ADDR entry_point, displacement; 2464 2465 if (exec_bfd == NULL) 2466 return 0; 2467 2468 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries 2469 being executed themselves and PIE (Position Independent Executable) 2470 executables are ET_DYN. */ 2471 2472 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0) 2473 return 0; 2474 2475 if (target_auxv_search (¤t_target, AT_ENTRY, &entry_point) <= 0) 2476 return 0; 2477 2478 displacement = entry_point - bfd_get_start_address (exec_bfd); 2479 2480 /* Verify the DISPLACEMENT candidate complies with the required page 2481 alignment. It is cheaper than the program headers comparison below. */ 2482 2483 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour) 2484 { 2485 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd); 2486 2487 /* p_align of PT_LOAD segments does not specify any alignment but 2488 only congruency of addresses: 2489 p_offset % p_align == p_vaddr % p_align 2490 Kernel is free to load the executable with lower alignment. */ 2491 2492 if ((displacement & (elf->minpagesize - 1)) != 0) 2493 return 0; 2494 } 2495 2496 /* Verify that the auxilliary vector describes the same file as exec_bfd, by 2497 comparing their program headers. If the program headers in the auxilliary 2498 vector do not match the program headers in the executable, then we are 2499 looking at a different file than the one used by the kernel - for 2500 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */ 2501 2502 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour) 2503 { 2504 /* Be optimistic and clear OK only if GDB was able to verify the headers 2505 really do not match. */ 2506 int phdrs_size, phdrs2_size, ok = 1; 2507 gdb_byte *buf, *buf2; 2508 int arch_size; 2509 2510 buf = read_program_header (-1, &phdrs_size, &arch_size); 2511 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size); 2512 if (buf != NULL && buf2 != NULL) 2513 { 2514 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ()); 2515 2516 /* We are dealing with three different addresses. EXEC_BFD 2517 represents current address in on-disk file. target memory content 2518 may be different from EXEC_BFD as the file may have been prelinked 2519 to a different address after the executable has been loaded. 2520 Moreover the address of placement in target memory can be 2521 different from what the program headers in target memory say - 2522 this is the goal of PIE. 2523 2524 Detected DISPLACEMENT covers both the offsets of PIE placement and 2525 possible new prelink performed after start of the program. Here 2526 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory 2527 content offset for the verification purpose. */ 2528 2529 if (phdrs_size != phdrs2_size 2530 || bfd_get_arch_size (exec_bfd) != arch_size) 2531 ok = 0; 2532 else if (arch_size == 32 2533 && phdrs_size >= sizeof (Elf32_External_Phdr) 2534 && phdrs_size % sizeof (Elf32_External_Phdr) == 0) 2535 { 2536 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header; 2537 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr; 2538 CORE_ADDR displacement = 0; 2539 int i; 2540 2541 /* DISPLACEMENT could be found more easily by the difference of 2542 ehdr2->e_entry. But we haven't read the ehdr yet, and we 2543 already have enough information to compute that displacement 2544 with what we've read. */ 2545 2546 for (i = 0; i < ehdr2->e_phnum; i++) 2547 if (phdr2[i].p_type == PT_LOAD) 2548 { 2549 Elf32_External_Phdr *phdrp; 2550 gdb_byte *buf_vaddr_p, *buf_paddr_p; 2551 CORE_ADDR vaddr, paddr; 2552 CORE_ADDR displacement_vaddr = 0; 2553 CORE_ADDR displacement_paddr = 0; 2554 2555 phdrp = &((Elf32_External_Phdr *) buf)[i]; 2556 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr; 2557 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr; 2558 2559 vaddr = extract_unsigned_integer (buf_vaddr_p, 4, 2560 byte_order); 2561 displacement_vaddr = vaddr - phdr2[i].p_vaddr; 2562 2563 paddr = extract_unsigned_integer (buf_paddr_p, 4, 2564 byte_order); 2565 displacement_paddr = paddr - phdr2[i].p_paddr; 2566 2567 if (displacement_vaddr == displacement_paddr) 2568 displacement = displacement_vaddr; 2569 2570 break; 2571 } 2572 2573 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */ 2574 2575 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++) 2576 { 2577 Elf32_External_Phdr *phdrp; 2578 Elf32_External_Phdr *phdr2p; 2579 gdb_byte *buf_vaddr_p, *buf_paddr_p; 2580 CORE_ADDR vaddr, paddr; 2581 asection *plt2_asect; 2582 2583 phdrp = &((Elf32_External_Phdr *) buf)[i]; 2584 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr; 2585 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr; 2586 phdr2p = &((Elf32_External_Phdr *) buf2)[i]; 2587 2588 /* PT_GNU_STACK is an exception by being never relocated by 2589 prelink as its addresses are always zero. */ 2590 2591 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0) 2592 continue; 2593 2594 /* Check also other adjustment combinations - PR 11786. */ 2595 2596 vaddr = extract_unsigned_integer (buf_vaddr_p, 4, 2597 byte_order); 2598 vaddr -= displacement; 2599 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr); 2600 2601 paddr = extract_unsigned_integer (buf_paddr_p, 4, 2602 byte_order); 2603 paddr -= displacement; 2604 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr); 2605 2606 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0) 2607 continue; 2608 2609 /* Strip modifies the flags and alignment of PT_GNU_RELRO. 2610 CentOS-5 has problems with filesz, memsz as well. 2611 See PR 11786. */ 2612 if (phdr2[i].p_type == PT_GNU_RELRO) 2613 { 2614 Elf32_External_Phdr tmp_phdr = *phdrp; 2615 Elf32_External_Phdr tmp_phdr2 = *phdr2p; 2616 2617 memset (tmp_phdr.p_filesz, 0, 4); 2618 memset (tmp_phdr.p_memsz, 0, 4); 2619 memset (tmp_phdr.p_flags, 0, 4); 2620 memset (tmp_phdr.p_align, 0, 4); 2621 memset (tmp_phdr2.p_filesz, 0, 4); 2622 memset (tmp_phdr2.p_memsz, 0, 4); 2623 memset (tmp_phdr2.p_flags, 0, 4); 2624 memset (tmp_phdr2.p_align, 0, 4); 2625 2626 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr)) 2627 == 0) 2628 continue; 2629 } 2630 2631 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */ 2632 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt"); 2633 if (plt2_asect) 2634 { 2635 int content2; 2636 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz; 2637 CORE_ADDR filesz; 2638 2639 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect) 2640 & SEC_HAS_CONTENTS) != 0; 2641 2642 filesz = extract_unsigned_integer (buf_filesz_p, 4, 2643 byte_order); 2644 2645 /* PLT2_ASECT is from on-disk file (exec_bfd) while 2646 FILESZ is from the in-memory image. */ 2647 if (content2) 2648 filesz += bfd_get_section_size (plt2_asect); 2649 else 2650 filesz -= bfd_get_section_size (plt2_asect); 2651 2652 store_unsigned_integer (buf_filesz_p, 4, byte_order, 2653 filesz); 2654 2655 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0) 2656 continue; 2657 } 2658 2659 ok = 0; 2660 break; 2661 } 2662 } 2663 else if (arch_size == 64 2664 && phdrs_size >= sizeof (Elf64_External_Phdr) 2665 && phdrs_size % sizeof (Elf64_External_Phdr) == 0) 2666 { 2667 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header; 2668 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr; 2669 CORE_ADDR displacement = 0; 2670 int i; 2671 2672 /* DISPLACEMENT could be found more easily by the difference of 2673 ehdr2->e_entry. But we haven't read the ehdr yet, and we 2674 already have enough information to compute that displacement 2675 with what we've read. */ 2676 2677 for (i = 0; i < ehdr2->e_phnum; i++) 2678 if (phdr2[i].p_type == PT_LOAD) 2679 { 2680 Elf64_External_Phdr *phdrp; 2681 gdb_byte *buf_vaddr_p, *buf_paddr_p; 2682 CORE_ADDR vaddr, paddr; 2683 CORE_ADDR displacement_vaddr = 0; 2684 CORE_ADDR displacement_paddr = 0; 2685 2686 phdrp = &((Elf64_External_Phdr *) buf)[i]; 2687 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr; 2688 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr; 2689 2690 vaddr = extract_unsigned_integer (buf_vaddr_p, 8, 2691 byte_order); 2692 displacement_vaddr = vaddr - phdr2[i].p_vaddr; 2693 2694 paddr = extract_unsigned_integer (buf_paddr_p, 8, 2695 byte_order); 2696 displacement_paddr = paddr - phdr2[i].p_paddr; 2697 2698 if (displacement_vaddr == displacement_paddr) 2699 displacement = displacement_vaddr; 2700 2701 break; 2702 } 2703 2704 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */ 2705 2706 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++) 2707 { 2708 Elf64_External_Phdr *phdrp; 2709 Elf64_External_Phdr *phdr2p; 2710 gdb_byte *buf_vaddr_p, *buf_paddr_p; 2711 CORE_ADDR vaddr, paddr; 2712 asection *plt2_asect; 2713 2714 phdrp = &((Elf64_External_Phdr *) buf)[i]; 2715 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr; 2716 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr; 2717 phdr2p = &((Elf64_External_Phdr *) buf2)[i]; 2718 2719 /* PT_GNU_STACK is an exception by being never relocated by 2720 prelink as its addresses are always zero. */ 2721 2722 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0) 2723 continue; 2724 2725 /* Check also other adjustment combinations - PR 11786. */ 2726 2727 vaddr = extract_unsigned_integer (buf_vaddr_p, 8, 2728 byte_order); 2729 vaddr -= displacement; 2730 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr); 2731 2732 paddr = extract_unsigned_integer (buf_paddr_p, 8, 2733 byte_order); 2734 paddr -= displacement; 2735 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr); 2736 2737 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0) 2738 continue; 2739 2740 /* Strip modifies the flags and alignment of PT_GNU_RELRO. 2741 CentOS-5 has problems with filesz, memsz as well. 2742 See PR 11786. */ 2743 if (phdr2[i].p_type == PT_GNU_RELRO) 2744 { 2745 Elf64_External_Phdr tmp_phdr = *phdrp; 2746 Elf64_External_Phdr tmp_phdr2 = *phdr2p; 2747 2748 memset (tmp_phdr.p_filesz, 0, 8); 2749 memset (tmp_phdr.p_memsz, 0, 8); 2750 memset (tmp_phdr.p_flags, 0, 4); 2751 memset (tmp_phdr.p_align, 0, 8); 2752 memset (tmp_phdr2.p_filesz, 0, 8); 2753 memset (tmp_phdr2.p_memsz, 0, 8); 2754 memset (tmp_phdr2.p_flags, 0, 4); 2755 memset (tmp_phdr2.p_align, 0, 8); 2756 2757 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr)) 2758 == 0) 2759 continue; 2760 } 2761 2762 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */ 2763 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt"); 2764 if (plt2_asect) 2765 { 2766 int content2; 2767 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz; 2768 CORE_ADDR filesz; 2769 2770 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect) 2771 & SEC_HAS_CONTENTS) != 0; 2772 2773 filesz = extract_unsigned_integer (buf_filesz_p, 8, 2774 byte_order); 2775 2776 /* PLT2_ASECT is from on-disk file (exec_bfd) while 2777 FILESZ is from the in-memory image. */ 2778 if (content2) 2779 filesz += bfd_get_section_size (plt2_asect); 2780 else 2781 filesz -= bfd_get_section_size (plt2_asect); 2782 2783 store_unsigned_integer (buf_filesz_p, 8, byte_order, 2784 filesz); 2785 2786 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0) 2787 continue; 2788 } 2789 2790 ok = 0; 2791 break; 2792 } 2793 } 2794 else 2795 ok = 0; 2796 } 2797 2798 xfree (buf); 2799 xfree (buf2); 2800 2801 if (!ok) 2802 return 0; 2803 } 2804 2805 if (info_verbose) 2806 { 2807 /* It can be printed repeatedly as there is no easy way to check 2808 the executable symbols/file has been already relocated to 2809 displacement. */ 2810 2811 printf_unfiltered (_("Using PIE (Position Independent Executable) " 2812 "displacement %s for \"%s\".\n"), 2813 paddress (target_gdbarch (), displacement), 2814 bfd_get_filename (exec_bfd)); 2815 } 2816 2817 *displacementp = displacement; 2818 return 1; 2819 } 2820 2821 /* Relocate the main executable. This function should be called upon 2822 stopping the inferior process at the entry point to the program. 2823 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are 2824 different, the main executable is relocated by the proper amount. */ 2825 2826 static void 2827 svr4_relocate_main_executable (void) 2828 { 2829 CORE_ADDR displacement; 2830 2831 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS 2832 probably contains the offsets computed using the PIE displacement 2833 from the previous run, which of course are irrelevant for this run. 2834 So we need to determine the new PIE displacement and recompute the 2835 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS 2836 already contains pre-computed offsets. 2837 2838 If we cannot compute the PIE displacement, either: 2839 2840 - The executable is not PIE. 2841 2842 - SYMFILE_OBJFILE does not match the executable started in the target. 2843 This can happen for main executable symbols loaded at the host while 2844 `ld.so --ld-args main-executable' is loaded in the target. 2845 2846 Then we leave the section offsets untouched and use them as is for 2847 this run. Either: 2848 2849 - These section offsets were properly reset earlier, and thus 2850 already contain the correct values. This can happen for instance 2851 when reconnecting via the remote protocol to a target that supports 2852 the `qOffsets' packet. 2853 2854 - The section offsets were not reset earlier, and the best we can 2855 hope is that the old offsets are still applicable to the new run. */ 2856 2857 if (! svr4_exec_displacement (&displacement)) 2858 return; 2859 2860 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file 2861 addresses. */ 2862 2863 if (symfile_objfile) 2864 { 2865 struct section_offsets *new_offsets; 2866 int i; 2867 2868 new_offsets = alloca (symfile_objfile->num_sections 2869 * sizeof (*new_offsets)); 2870 2871 for (i = 0; i < symfile_objfile->num_sections; i++) 2872 new_offsets->offsets[i] = displacement; 2873 2874 objfile_relocate (symfile_objfile, new_offsets); 2875 } 2876 else if (exec_bfd) 2877 { 2878 asection *asect; 2879 2880 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next) 2881 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index, 2882 (bfd_section_vma (exec_bfd, asect) 2883 + displacement)); 2884 } 2885 } 2886 2887 /* Implement the "create_inferior_hook" target_solib_ops method. 2888 2889 For SVR4 executables, this first instruction is either the first 2890 instruction in the dynamic linker (for dynamically linked 2891 executables) or the instruction at "start" for statically linked 2892 executables. For dynamically linked executables, the system 2893 first exec's /lib/libc.so.N, which contains the dynamic linker, 2894 and starts it running. The dynamic linker maps in any needed 2895 shared libraries, maps in the actual user executable, and then 2896 jumps to "start" in the user executable. 2897 2898 We can arrange to cooperate with the dynamic linker to discover the 2899 names of shared libraries that are dynamically linked, and the base 2900 addresses to which they are linked. 2901 2902 This function is responsible for discovering those names and 2903 addresses, and saving sufficient information about them to allow 2904 their symbols to be read at a later time. */ 2905 2906 static void 2907 svr4_solib_create_inferior_hook (int from_tty) 2908 { 2909 struct svr4_info *info; 2910 2911 info = get_svr4_info (); 2912 2913 /* Clear the probes-based interface's state. */ 2914 free_probes_table (info); 2915 free_solib_list (info); 2916 2917 /* Relocate the main executable if necessary. */ 2918 svr4_relocate_main_executable (); 2919 2920 /* No point setting a breakpoint in the dynamic linker if we can't 2921 hit it (e.g., a core file, or a trace file). */ 2922 if (!target_has_execution) 2923 return; 2924 2925 if (!svr4_have_link_map_offsets ()) 2926 return; 2927 2928 if (!enable_break (info, from_tty)) 2929 return; 2930 } 2931 2932 static void 2933 svr4_clear_solib (void) 2934 { 2935 struct svr4_info *info; 2936 2937 info = get_svr4_info (); 2938 info->debug_base = 0; 2939 info->debug_loader_offset_p = 0; 2940 info->debug_loader_offset = 0; 2941 xfree (info->debug_loader_name); 2942 info->debug_loader_name = NULL; 2943 } 2944 2945 /* Clear any bits of ADDR that wouldn't fit in a target-format 2946 data pointer. "Data pointer" here refers to whatever sort of 2947 address the dynamic linker uses to manage its sections. At the 2948 moment, we don't support shared libraries on any processors where 2949 code and data pointers are different sizes. 2950 2951 This isn't really the right solution. What we really need here is 2952 a way to do arithmetic on CORE_ADDR values that respects the 2953 natural pointer/address correspondence. (For example, on the MIPS, 2954 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to 2955 sign-extend the value. There, simply truncating the bits above 2956 gdbarch_ptr_bit, as we do below, is no good.) This should probably 2957 be a new gdbarch method or something. */ 2958 static CORE_ADDR 2959 svr4_truncate_ptr (CORE_ADDR addr) 2960 { 2961 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8) 2962 /* We don't need to truncate anything, and the bit twiddling below 2963 will fail due to overflow problems. */ 2964 return addr; 2965 else 2966 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1); 2967 } 2968 2969 2970 static void 2971 svr4_relocate_section_addresses (struct so_list *so, 2972 struct target_section *sec) 2973 { 2974 bfd *abfd = sec->the_bfd_section->owner; 2975 2976 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd)); 2977 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd)); 2978 } 2979 2980 2981 /* Architecture-specific operations. */ 2982 2983 /* Per-architecture data key. */ 2984 static struct gdbarch_data *solib_svr4_data; 2985 2986 struct solib_svr4_ops 2987 { 2988 /* Return a description of the layout of `struct link_map'. */ 2989 struct link_map_offsets *(*fetch_link_map_offsets)(void); 2990 }; 2991 2992 /* Return a default for the architecture-specific operations. */ 2993 2994 static void * 2995 solib_svr4_init (struct obstack *obstack) 2996 { 2997 struct solib_svr4_ops *ops; 2998 2999 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops); 3000 ops->fetch_link_map_offsets = NULL; 3001 return ops; 3002 } 3003 3004 /* Set the architecture-specific `struct link_map_offsets' fetcher for 3005 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */ 3006 3007 void 3008 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch, 3009 struct link_map_offsets *(*flmo) (void)) 3010 { 3011 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data); 3012 3013 ops->fetch_link_map_offsets = flmo; 3014 3015 set_solib_ops (gdbarch, &svr4_so_ops); 3016 } 3017 3018 /* Fetch a link_map_offsets structure using the architecture-specific 3019 `struct link_map_offsets' fetcher. */ 3020 3021 static struct link_map_offsets * 3022 svr4_fetch_link_map_offsets (void) 3023 { 3024 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data); 3025 3026 gdb_assert (ops->fetch_link_map_offsets); 3027 return ops->fetch_link_map_offsets (); 3028 } 3029 3030 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */ 3031 3032 static int 3033 svr4_have_link_map_offsets (void) 3034 { 3035 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data); 3036 3037 return (ops->fetch_link_map_offsets != NULL); 3038 } 3039 3040 3041 /* Most OS'es that have SVR4-style ELF dynamic libraries define a 3042 `struct r_debug' and a `struct link_map' that are binary compatible 3043 with the origional SVR4 implementation. */ 3044 3045 /* Fetch (and possibly build) an appropriate `struct link_map_offsets' 3046 for an ILP32 SVR4 system. */ 3047 3048 struct link_map_offsets * 3049 svr4_ilp32_fetch_link_map_offsets (void) 3050 { 3051 static struct link_map_offsets lmo; 3052 static struct link_map_offsets *lmp = NULL; 3053 3054 if (lmp == NULL) 3055 { 3056 lmp = &lmo; 3057 3058 lmo.r_version_offset = 0; 3059 lmo.r_version_size = 4; 3060 lmo.r_map_offset = 4; 3061 lmo.r_brk_offset = 8; 3062 lmo.r_ldsomap_offset = 20; 3063 3064 /* Everything we need is in the first 20 bytes. */ 3065 lmo.link_map_size = 20; 3066 lmo.l_addr_offset = 0; 3067 lmo.l_name_offset = 4; 3068 lmo.l_ld_offset = 8; 3069 lmo.l_next_offset = 12; 3070 lmo.l_prev_offset = 16; 3071 } 3072 3073 return lmp; 3074 } 3075 3076 /* Fetch (and possibly build) an appropriate `struct link_map_offsets' 3077 for an LP64 SVR4 system. */ 3078 3079 struct link_map_offsets * 3080 svr4_lp64_fetch_link_map_offsets (void) 3081 { 3082 static struct link_map_offsets lmo; 3083 static struct link_map_offsets *lmp = NULL; 3084 3085 if (lmp == NULL) 3086 { 3087 lmp = &lmo; 3088 3089 lmo.r_version_offset = 0; 3090 lmo.r_version_size = 4; 3091 lmo.r_map_offset = 8; 3092 lmo.r_brk_offset = 16; 3093 lmo.r_ldsomap_offset = 40; 3094 3095 /* Everything we need is in the first 40 bytes. */ 3096 lmo.link_map_size = 40; 3097 lmo.l_addr_offset = 0; 3098 lmo.l_name_offset = 8; 3099 lmo.l_ld_offset = 16; 3100 lmo.l_next_offset = 24; 3101 lmo.l_prev_offset = 32; 3102 } 3103 3104 return lmp; 3105 } 3106 3107 3108 struct target_so_ops svr4_so_ops; 3109 3110 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a 3111 different rule for symbol lookup. The lookup begins here in the DSO, not in 3112 the main executable. */ 3113 3114 static struct symbol * 3115 elf_lookup_lib_symbol (const struct objfile *objfile, 3116 const char *name, 3117 const domain_enum domain) 3118 { 3119 bfd *abfd; 3120 3121 if (objfile == symfile_objfile) 3122 abfd = exec_bfd; 3123 else 3124 { 3125 /* OBJFILE should have been passed as the non-debug one. */ 3126 gdb_assert (objfile->separate_debug_objfile_backlink == NULL); 3127 3128 abfd = objfile->obfd; 3129 } 3130 3131 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1) 3132 return NULL; 3133 3134 return lookup_global_symbol_from_objfile (objfile, name, domain); 3135 } 3136 3137 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */ 3138 3139 void 3140 _initialize_svr4_solib (void) 3141 { 3142 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init); 3143 solib_svr4_pspace_data 3144 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup); 3145 3146 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses; 3147 svr4_so_ops.free_so = svr4_free_so; 3148 svr4_so_ops.clear_so = svr4_clear_so; 3149 svr4_so_ops.clear_solib = svr4_clear_solib; 3150 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook; 3151 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling; 3152 svr4_so_ops.current_sos = svr4_current_sos; 3153 svr4_so_ops.open_symbol_file_object = open_symbol_file_object; 3154 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code; 3155 svr4_so_ops.bfd_open = solib_bfd_open; 3156 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol; 3157 svr4_so_ops.same = svr4_same; 3158 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core; 3159 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints; 3160 svr4_so_ops.handle_event = svr4_handle_solib_event; 3161 } 3162