1 //===- SyntheticSections.cpp ----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains linker-synthesized sections. Currently, 10 // synthetic sections are created either output sections or input sections, 11 // but we are rewriting code so that all synthetic sections are created as 12 // input sections. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "SyntheticSections.h" 17 #include "Config.h" 18 #include "InputFiles.h" 19 #include "LinkerScript.h" 20 #include "OutputSections.h" 21 #include "SymbolTable.h" 22 #include "Symbols.h" 23 #include "Target.h" 24 #include "Writer.h" 25 #include "lld/Common/ErrorHandler.h" 26 #include "lld/Common/Memory.h" 27 #include "lld/Common/Strings.h" 28 #include "lld/Common/Threads.h" 29 #include "lld/Common/Version.h" 30 #include "llvm/ADT/SetOperations.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/BinaryFormat/Dwarf.h" 33 #include "llvm/DebugInfo/DWARF/DWARFDebugPubTable.h" 34 #include "llvm/Object/ELFObjectFile.h" 35 #include "llvm/Support/Compression.h" 36 #include "llvm/Support/Endian.h" 37 #include "llvm/Support/LEB128.h" 38 #include "llvm/Support/MD5.h" 39 #include <cstdlib> 40 #include <thread> 41 42 using namespace llvm; 43 using namespace llvm::dwarf; 44 using namespace llvm::ELF; 45 using namespace llvm::object; 46 using namespace llvm::support; 47 48 using namespace lld; 49 using namespace lld::elf; 50 51 using llvm::support::endian::read32le; 52 using llvm::support::endian::write32le; 53 using llvm::support::endian::write64le; 54 55 constexpr size_t MergeNoTailSection::numShards; 56 57 static uint64_t readUint(uint8_t *buf) { 58 return config->is64 ? read64(buf) : read32(buf); 59 } 60 61 static void writeUint(uint8_t *buf, uint64_t val) { 62 if (config->is64) 63 write64(buf, val); 64 else 65 write32(buf, val); 66 } 67 68 // Returns an LLD version string. 69 static ArrayRef<uint8_t> getVersion() { 70 // Check LLD_VERSION first for ease of testing. 71 // You can get consistent output by using the environment variable. 72 // This is only for testing. 73 StringRef s = getenv("LLD_VERSION"); 74 if (s.empty()) 75 s = saver.save(Twine("Linker: ") + getLLDVersion()); 76 77 // +1 to include the terminating '\0'. 78 return {(const uint8_t *)s.data(), s.size() + 1}; 79 } 80 81 // Creates a .comment section containing LLD version info. 82 // With this feature, you can identify LLD-generated binaries easily 83 // by "readelf --string-dump .comment <file>". 84 // The returned object is a mergeable string section. 85 MergeInputSection *elf::createCommentSection() { 86 return make<MergeInputSection>(SHF_MERGE | SHF_STRINGS, SHT_PROGBITS, 1, 87 getVersion(), ".comment"); 88 } 89 90 // .MIPS.abiflags section. 91 template <class ELFT> 92 MipsAbiFlagsSection<ELFT>::MipsAbiFlagsSection(Elf_Mips_ABIFlags flags) 93 : SyntheticSection(SHF_ALLOC, SHT_MIPS_ABIFLAGS, 8, ".MIPS.abiflags"), 94 flags(flags) { 95 this->entsize = sizeof(Elf_Mips_ABIFlags); 96 } 97 98 template <class ELFT> void MipsAbiFlagsSection<ELFT>::writeTo(uint8_t *buf) { 99 memcpy(buf, &flags, sizeof(flags)); 100 } 101 102 template <class ELFT> 103 MipsAbiFlagsSection<ELFT> *MipsAbiFlagsSection<ELFT>::create() { 104 Elf_Mips_ABIFlags flags = {}; 105 bool create = false; 106 107 for (InputSectionBase *sec : inputSections) { 108 if (sec->type != SHT_MIPS_ABIFLAGS) 109 continue; 110 sec->markDead(); 111 create = true; 112 113 std::string filename = toString(sec->file); 114 const size_t size = sec->data().size(); 115 // Older version of BFD (such as the default FreeBSD linker) concatenate 116 // .MIPS.abiflags instead of merging. To allow for this case (or potential 117 // zero padding) we ignore everything after the first Elf_Mips_ABIFlags 118 if (size < sizeof(Elf_Mips_ABIFlags)) { 119 error(filename + ": invalid size of .MIPS.abiflags section: got " + 120 Twine(size) + " instead of " + Twine(sizeof(Elf_Mips_ABIFlags))); 121 return nullptr; 122 } 123 auto *s = reinterpret_cast<const Elf_Mips_ABIFlags *>(sec->data().data()); 124 if (s->version != 0) { 125 error(filename + ": unexpected .MIPS.abiflags version " + 126 Twine(s->version)); 127 return nullptr; 128 } 129 130 // LLD checks ISA compatibility in calcMipsEFlags(). Here we just 131 // select the highest number of ISA/Rev/Ext. 132 flags.isa_level = std::max(flags.isa_level, s->isa_level); 133 flags.isa_rev = std::max(flags.isa_rev, s->isa_rev); 134 flags.isa_ext = std::max(flags.isa_ext, s->isa_ext); 135 flags.gpr_size = std::max(flags.gpr_size, s->gpr_size); 136 flags.cpr1_size = std::max(flags.cpr1_size, s->cpr1_size); 137 flags.cpr2_size = std::max(flags.cpr2_size, s->cpr2_size); 138 flags.ases |= s->ases; 139 flags.flags1 |= s->flags1; 140 flags.flags2 |= s->flags2; 141 flags.fp_abi = elf::getMipsFpAbiFlag(flags.fp_abi, s->fp_abi, filename); 142 }; 143 144 if (create) 145 return make<MipsAbiFlagsSection<ELFT>>(flags); 146 return nullptr; 147 } 148 149 // .MIPS.options section. 150 template <class ELFT> 151 MipsOptionsSection<ELFT>::MipsOptionsSection(Elf_Mips_RegInfo reginfo) 152 : SyntheticSection(SHF_ALLOC, SHT_MIPS_OPTIONS, 8, ".MIPS.options"), 153 reginfo(reginfo) { 154 this->entsize = sizeof(Elf_Mips_Options) + sizeof(Elf_Mips_RegInfo); 155 } 156 157 template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *buf) { 158 auto *options = reinterpret_cast<Elf_Mips_Options *>(buf); 159 options->kind = ODK_REGINFO; 160 options->size = getSize(); 161 162 if (!config->relocatable) 163 reginfo.ri_gp_value = in.mipsGot->getGp(); 164 memcpy(buf + sizeof(Elf_Mips_Options), ®info, sizeof(reginfo)); 165 } 166 167 template <class ELFT> 168 MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() { 169 // N64 ABI only. 170 if (!ELFT::Is64Bits) 171 return nullptr; 172 173 std::vector<InputSectionBase *> sections; 174 for (InputSectionBase *sec : inputSections) 175 if (sec->type == SHT_MIPS_OPTIONS) 176 sections.push_back(sec); 177 178 if (sections.empty()) 179 return nullptr; 180 181 Elf_Mips_RegInfo reginfo = {}; 182 for (InputSectionBase *sec : sections) { 183 sec->markDead(); 184 185 std::string filename = toString(sec->file); 186 ArrayRef<uint8_t> d = sec->data(); 187 188 while (!d.empty()) { 189 if (d.size() < sizeof(Elf_Mips_Options)) { 190 error(filename + ": invalid size of .MIPS.options section"); 191 break; 192 } 193 194 auto *opt = reinterpret_cast<const Elf_Mips_Options *>(d.data()); 195 if (opt->kind == ODK_REGINFO) { 196 reginfo.ri_gprmask |= opt->getRegInfo().ri_gprmask; 197 sec->getFile<ELFT>()->mipsGp0 = opt->getRegInfo().ri_gp_value; 198 break; 199 } 200 201 if (!opt->size) 202 fatal(filename + ": zero option descriptor size"); 203 d = d.slice(opt->size); 204 } 205 }; 206 207 return make<MipsOptionsSection<ELFT>>(reginfo); 208 } 209 210 // MIPS .reginfo section. 211 template <class ELFT> 212 MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo reginfo) 213 : SyntheticSection(SHF_ALLOC, SHT_MIPS_REGINFO, 4, ".reginfo"), 214 reginfo(reginfo) { 215 this->entsize = sizeof(Elf_Mips_RegInfo); 216 } 217 218 template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *buf) { 219 if (!config->relocatable) 220 reginfo.ri_gp_value = in.mipsGot->getGp(); 221 memcpy(buf, ®info, sizeof(reginfo)); 222 } 223 224 template <class ELFT> 225 MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() { 226 // Section should be alive for O32 and N32 ABIs only. 227 if (ELFT::Is64Bits) 228 return nullptr; 229 230 std::vector<InputSectionBase *> sections; 231 for (InputSectionBase *sec : inputSections) 232 if (sec->type == SHT_MIPS_REGINFO) 233 sections.push_back(sec); 234 235 if (sections.empty()) 236 return nullptr; 237 238 Elf_Mips_RegInfo reginfo = {}; 239 for (InputSectionBase *sec : sections) { 240 sec->markDead(); 241 242 if (sec->data().size() != sizeof(Elf_Mips_RegInfo)) { 243 error(toString(sec->file) + ": invalid size of .reginfo section"); 244 return nullptr; 245 } 246 247 auto *r = reinterpret_cast<const Elf_Mips_RegInfo *>(sec->data().data()); 248 reginfo.ri_gprmask |= r->ri_gprmask; 249 sec->getFile<ELFT>()->mipsGp0 = r->ri_gp_value; 250 }; 251 252 return make<MipsReginfoSection<ELFT>>(reginfo); 253 } 254 255 InputSection *elf::createInterpSection() { 256 // StringSaver guarantees that the returned string ends with '\0'. 257 StringRef s = saver.save(config->dynamicLinker); 258 ArrayRef<uint8_t> contents = {(const uint8_t *)s.data(), s.size() + 1}; 259 260 auto *sec = make<InputSection>(nullptr, SHF_ALLOC, SHT_PROGBITS, 1, contents, 261 ".interp"); 262 sec->markLive(); 263 return sec; 264 } 265 266 Defined *elf::addSyntheticLocal(StringRef name, uint8_t type, uint64_t value, 267 uint64_t size, InputSectionBase §ion) { 268 auto *s = make<Defined>(section.file, name, STB_LOCAL, STV_DEFAULT, type, 269 value, size, §ion); 270 if (in.symTab) 271 in.symTab->addSymbol(s); 272 return s; 273 } 274 275 static size_t getHashSize() { 276 switch (config->buildId) { 277 case BuildIdKind::Fast: 278 return 8; 279 case BuildIdKind::Md5: 280 case BuildIdKind::Uuid: 281 return 16; 282 case BuildIdKind::Sha1: 283 return 20; 284 case BuildIdKind::Hexstring: 285 return config->buildIdVector.size(); 286 default: 287 llvm_unreachable("unknown BuildIdKind"); 288 } 289 } 290 291 // This class represents a linker-synthesized .note.gnu.property section. 292 // 293 // In x86 and AArch64, object files may contain feature flags indicating the 294 // features that they have used. The flags are stored in a .note.gnu.property 295 // section. 296 // 297 // lld reads the sections from input files and merges them by computing AND of 298 // the flags. The result is written as a new .note.gnu.property section. 299 // 300 // If the flag is zero (which indicates that the intersection of the feature 301 // sets is empty, or some input files didn't have .note.gnu.property sections), 302 // we don't create this section. 303 GnuPropertySection::GnuPropertySection() 304 : SyntheticSection(llvm::ELF::SHF_ALLOC, llvm::ELF::SHT_NOTE, 4, 305 ".note.gnu.property") {} 306 307 void GnuPropertySection::writeTo(uint8_t *buf) { 308 uint32_t featureAndType = config->emachine == EM_AARCH64 309 ? GNU_PROPERTY_AARCH64_FEATURE_1_AND 310 : GNU_PROPERTY_X86_FEATURE_1_AND; 311 312 write32(buf, 4); // Name size 313 write32(buf + 4, config->is64 ? 16 : 12); // Content size 314 write32(buf + 8, NT_GNU_PROPERTY_TYPE_0); // Type 315 memcpy(buf + 12, "GNU", 4); // Name string 316 write32(buf + 16, featureAndType); // Feature type 317 write32(buf + 20, 4); // Feature size 318 write32(buf + 24, config->andFeatures); // Feature flags 319 if (config->is64) 320 write32(buf + 28, 0); // Padding 321 } 322 323 size_t GnuPropertySection::getSize() const { return config->is64 ? 32 : 28; } 324 325 BuildIdSection::BuildIdSection() 326 : SyntheticSection(SHF_ALLOC, SHT_NOTE, 4, ".note.gnu.build-id"), 327 hashSize(getHashSize()) {} 328 329 void BuildIdSection::writeTo(uint8_t *buf) { 330 write32(buf, 4); // Name size 331 write32(buf + 4, hashSize); // Content size 332 write32(buf + 8, NT_GNU_BUILD_ID); // Type 333 memcpy(buf + 12, "GNU", 4); // Name string 334 hashBuf = buf + 16; 335 } 336 337 void BuildIdSection::writeBuildId(ArrayRef<uint8_t> buf) { 338 assert(buf.size() == hashSize); 339 memcpy(hashBuf, buf.data(), hashSize); 340 } 341 342 BssSection::BssSection(StringRef name, uint64_t size, uint32_t alignment) 343 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, alignment, name) { 344 this->bss = true; 345 this->size = size; 346 } 347 348 EhFrameSection::EhFrameSection() 349 : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame") {} 350 351 // Search for an existing CIE record or create a new one. 352 // CIE records from input object files are uniquified by their contents 353 // and where their relocations point to. 354 template <class ELFT, class RelTy> 355 CieRecord *EhFrameSection::addCie(EhSectionPiece &cie, ArrayRef<RelTy> rels) { 356 Symbol *personality = nullptr; 357 unsigned firstRelI = cie.firstRelocation; 358 if (firstRelI != (unsigned)-1) 359 personality = 360 &cie.sec->template getFile<ELFT>()->getRelocTargetSym(rels[firstRelI]); 361 362 // Search for an existing CIE by CIE contents/relocation target pair. 363 CieRecord *&rec = cieMap[{cie.data(), personality}]; 364 365 // If not found, create a new one. 366 if (!rec) { 367 rec = make<CieRecord>(); 368 rec->cie = &cie; 369 cieRecords.push_back(rec); 370 } 371 return rec; 372 } 373 374 // There is one FDE per function. Returns true if a given FDE 375 // points to a live function. 376 template <class ELFT, class RelTy> 377 bool EhFrameSection::isFdeLive(EhSectionPiece &fde, ArrayRef<RelTy> rels) { 378 auto *sec = cast<EhInputSection>(fde.sec); 379 unsigned firstRelI = fde.firstRelocation; 380 381 // An FDE should point to some function because FDEs are to describe 382 // functions. That's however not always the case due to an issue of 383 // ld.gold with -r. ld.gold may discard only functions and leave their 384 // corresponding FDEs, which results in creating bad .eh_frame sections. 385 // To deal with that, we ignore such FDEs. 386 if (firstRelI == (unsigned)-1) 387 return false; 388 389 const RelTy &rel = rels[firstRelI]; 390 Symbol &b = sec->template getFile<ELFT>()->getRelocTargetSym(rel); 391 392 // FDEs for garbage-collected or merged-by-ICF sections, or sections in 393 // another partition, are dead. 394 if (auto *d = dyn_cast<Defined>(&b)) 395 if (SectionBase *sec = d->section) 396 return sec->partition == partition; 397 return false; 398 } 399 400 // .eh_frame is a sequence of CIE or FDE records. In general, there 401 // is one CIE record per input object file which is followed by 402 // a list of FDEs. This function searches an existing CIE or create a new 403 // one and associates FDEs to the CIE. 404 template <class ELFT, class RelTy> 405 void EhFrameSection::addSectionAux(EhInputSection *sec, ArrayRef<RelTy> rels) { 406 offsetToCie.clear(); 407 for (EhSectionPiece &piece : sec->pieces) { 408 // The empty record is the end marker. 409 if (piece.size == 4) 410 return; 411 412 size_t offset = piece.inputOff; 413 uint32_t id = read32(piece.data().data() + 4); 414 if (id == 0) { 415 offsetToCie[offset] = addCie<ELFT>(piece, rels); 416 continue; 417 } 418 419 uint32_t cieOffset = offset + 4 - id; 420 CieRecord *rec = offsetToCie[cieOffset]; 421 if (!rec) 422 fatal(toString(sec) + ": invalid CIE reference"); 423 424 if (!isFdeLive<ELFT>(piece, rels)) 425 continue; 426 rec->fdes.push_back(&piece); 427 numFdes++; 428 } 429 } 430 431 template <class ELFT> void EhFrameSection::addSection(InputSectionBase *c) { 432 auto *sec = cast<EhInputSection>(c); 433 sec->parent = this; 434 435 alignment = std::max(alignment, sec->alignment); 436 sections.push_back(sec); 437 438 for (auto *ds : sec->dependentSections) 439 dependentSections.push_back(ds); 440 441 if (sec->pieces.empty()) 442 return; 443 444 if (sec->areRelocsRela) 445 addSectionAux<ELFT>(sec, sec->template relas<ELFT>()); 446 else 447 addSectionAux<ELFT>(sec, sec->template rels<ELFT>()); 448 } 449 450 static void writeCieFde(uint8_t *buf, ArrayRef<uint8_t> d) { 451 memcpy(buf, d.data(), d.size()); 452 453 size_t aligned = alignTo(d.size(), config->wordsize); 454 455 // Zero-clear trailing padding if it exists. 456 memset(buf + d.size(), 0, aligned - d.size()); 457 458 // Fix the size field. -4 since size does not include the size field itself. 459 write32(buf, aligned - 4); 460 } 461 462 void EhFrameSection::finalizeContents() { 463 assert(!this->size); // Not finalized. 464 size_t off = 0; 465 for (CieRecord *rec : cieRecords) { 466 rec->cie->outputOff = off; 467 off += alignTo(rec->cie->size, config->wordsize); 468 469 for (EhSectionPiece *fde : rec->fdes) { 470 fde->outputOff = off; 471 off += alignTo(fde->size, config->wordsize); 472 } 473 } 474 475 // The LSB standard does not allow a .eh_frame section with zero 476 // Call Frame Information records. glibc unwind-dw2-fde.c 477 // classify_object_over_fdes expects there is a CIE record length 0 as a 478 // terminator. Thus we add one unconditionally. 479 off += 4; 480 481 this->size = off; 482 } 483 484 // Returns data for .eh_frame_hdr. .eh_frame_hdr is a binary search table 485 // to get an FDE from an address to which FDE is applied. This function 486 // returns a list of such pairs. 487 std::vector<EhFrameSection::FdeData> EhFrameSection::getFdeData() const { 488 uint8_t *buf = Out::bufferStart + getParent()->offset + outSecOff; 489 std::vector<FdeData> ret; 490 491 uint64_t va = getPartition().ehFrameHdr->getVA(); 492 for (CieRecord *rec : cieRecords) { 493 uint8_t enc = getFdeEncoding(rec->cie); 494 for (EhSectionPiece *fde : rec->fdes) { 495 uint64_t pc = getFdePc(buf, fde->outputOff, enc); 496 uint64_t fdeVA = getParent()->addr + fde->outputOff; 497 if (!isInt<32>(pc - va)) 498 fatal(toString(fde->sec) + ": PC offset is too large: 0x" + 499 Twine::utohexstr(pc - va)); 500 ret.push_back({uint32_t(pc - va), uint32_t(fdeVA - va)}); 501 } 502 } 503 504 // Sort the FDE list by their PC and uniqueify. Usually there is only 505 // one FDE for a PC (i.e. function), but if ICF merges two functions 506 // into one, there can be more than one FDEs pointing to the address. 507 auto less = [](const FdeData &a, const FdeData &b) { 508 return a.pcRel < b.pcRel; 509 }; 510 llvm::stable_sort(ret, less); 511 auto eq = [](const FdeData &a, const FdeData &b) { 512 return a.pcRel == b.pcRel; 513 }; 514 ret.erase(std::unique(ret.begin(), ret.end(), eq), ret.end()); 515 516 return ret; 517 } 518 519 static uint64_t readFdeAddr(uint8_t *buf, int size) { 520 switch (size) { 521 case DW_EH_PE_udata2: 522 return read16(buf); 523 case DW_EH_PE_sdata2: 524 return (int16_t)read16(buf); 525 case DW_EH_PE_udata4: 526 return read32(buf); 527 case DW_EH_PE_sdata4: 528 return (int32_t)read32(buf); 529 case DW_EH_PE_udata8: 530 case DW_EH_PE_sdata8: 531 return read64(buf); 532 case DW_EH_PE_absptr: 533 return readUint(buf); 534 } 535 fatal("unknown FDE size encoding"); 536 } 537 538 // Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to. 539 // We need it to create .eh_frame_hdr section. 540 uint64_t EhFrameSection::getFdePc(uint8_t *buf, size_t fdeOff, 541 uint8_t enc) const { 542 // The starting address to which this FDE applies is 543 // stored at FDE + 8 byte. 544 size_t off = fdeOff + 8; 545 uint64_t addr = readFdeAddr(buf + off, enc & 0xf); 546 if ((enc & 0x70) == DW_EH_PE_absptr) 547 return addr; 548 if ((enc & 0x70) == DW_EH_PE_pcrel) 549 return addr + getParent()->addr + off; 550 fatal("unknown FDE size relative encoding"); 551 } 552 553 void EhFrameSection::writeTo(uint8_t *buf) { 554 // Write CIE and FDE records. 555 for (CieRecord *rec : cieRecords) { 556 size_t cieOffset = rec->cie->outputOff; 557 writeCieFde(buf + cieOffset, rec->cie->data()); 558 559 for (EhSectionPiece *fde : rec->fdes) { 560 size_t off = fde->outputOff; 561 writeCieFde(buf + off, fde->data()); 562 563 // FDE's second word should have the offset to an associated CIE. 564 // Write it. 565 write32(buf + off + 4, off + 4 - cieOffset); 566 } 567 } 568 569 // Apply relocations. .eh_frame section contents are not contiguous 570 // in the output buffer, but relocateAlloc() still works because 571 // getOffset() takes care of discontiguous section pieces. 572 for (EhInputSection *s : sections) 573 s->relocateAlloc(buf, nullptr); 574 575 if (getPartition().ehFrameHdr && getPartition().ehFrameHdr->getParent()) 576 getPartition().ehFrameHdr->write(); 577 } 578 579 GotSection::GotSection() 580 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, config->wordsize, 581 ".got") { 582 // If ElfSym::globalOffsetTable is relative to .got and is referenced, 583 // increase numEntries by the number of entries used to emit 584 // ElfSym::globalOffsetTable. 585 if (ElfSym::globalOffsetTable && !target->gotBaseSymInGotPlt) 586 numEntries += target->gotHeaderEntriesNum; 587 } 588 589 void GotSection::addEntry(Symbol &sym) { 590 sym.gotIndex = numEntries; 591 ++numEntries; 592 } 593 594 bool GotSection::addDynTlsEntry(Symbol &sym) { 595 if (sym.globalDynIndex != -1U) 596 return false; 597 sym.globalDynIndex = numEntries; 598 // Global Dynamic TLS entries take two GOT slots. 599 numEntries += 2; 600 return true; 601 } 602 603 // Reserves TLS entries for a TLS module ID and a TLS block offset. 604 // In total it takes two GOT slots. 605 bool GotSection::addTlsIndex() { 606 if (tlsIndexOff != uint32_t(-1)) 607 return false; 608 tlsIndexOff = numEntries * config->wordsize; 609 numEntries += 2; 610 return true; 611 } 612 613 uint64_t GotSection::getGlobalDynAddr(const Symbol &b) const { 614 return this->getVA() + b.globalDynIndex * config->wordsize; 615 } 616 617 uint64_t GotSection::getGlobalDynOffset(const Symbol &b) const { 618 return b.globalDynIndex * config->wordsize; 619 } 620 621 void GotSection::finalizeContents() { 622 size = numEntries * config->wordsize; 623 } 624 625 bool GotSection::isNeeded() const { 626 // We need to emit a GOT even if it's empty if there's a relocation that is 627 // relative to GOT(such as GOTOFFREL). 628 return numEntries || hasGotOffRel; 629 } 630 631 void GotSection::writeTo(uint8_t *buf) { 632 // Buf points to the start of this section's buffer, 633 // whereas InputSectionBase::relocateAlloc() expects its argument 634 // to point to the start of the output section. 635 target->writeGotHeader(buf); 636 relocateAlloc(buf - outSecOff, buf - outSecOff + size); 637 } 638 639 static uint64_t getMipsPageAddr(uint64_t addr) { 640 return (addr + 0x8000) & ~0xffff; 641 } 642 643 static uint64_t getMipsPageCount(uint64_t size) { 644 return (size + 0xfffe) / 0xffff + 1; 645 } 646 647 MipsGotSection::MipsGotSection() 648 : SyntheticSection(SHF_ALLOC | SHF_WRITE | SHF_MIPS_GPREL, SHT_PROGBITS, 16, 649 ".got") {} 650 651 void MipsGotSection::addEntry(InputFile &file, Symbol &sym, int64_t addend, 652 RelExpr expr) { 653 FileGot &g = getGot(file); 654 if (expr == R_MIPS_GOT_LOCAL_PAGE) { 655 if (const OutputSection *os = sym.getOutputSection()) 656 g.pagesMap.insert({os, {}}); 657 else 658 g.local16.insert({{nullptr, getMipsPageAddr(sym.getVA(addend))}, 0}); 659 } else if (sym.isTls()) 660 g.tls.insert({&sym, 0}); 661 else if (sym.isPreemptible && expr == R_ABS) 662 g.relocs.insert({&sym, 0}); 663 else if (sym.isPreemptible) 664 g.global.insert({&sym, 0}); 665 else if (expr == R_MIPS_GOT_OFF32) 666 g.local32.insert({{&sym, addend}, 0}); 667 else 668 g.local16.insert({{&sym, addend}, 0}); 669 } 670 671 void MipsGotSection::addDynTlsEntry(InputFile &file, Symbol &sym) { 672 getGot(file).dynTlsSymbols.insert({&sym, 0}); 673 } 674 675 void MipsGotSection::addTlsIndex(InputFile &file) { 676 getGot(file).dynTlsSymbols.insert({nullptr, 0}); 677 } 678 679 size_t MipsGotSection::FileGot::getEntriesNum() const { 680 return getPageEntriesNum() + local16.size() + global.size() + relocs.size() + 681 tls.size() + dynTlsSymbols.size() * 2; 682 } 683 684 size_t MipsGotSection::FileGot::getPageEntriesNum() const { 685 size_t num = 0; 686 for (const std::pair<const OutputSection *, FileGot::PageBlock> &p : pagesMap) 687 num += p.second.count; 688 return num; 689 } 690 691 size_t MipsGotSection::FileGot::getIndexedEntriesNum() const { 692 size_t count = getPageEntriesNum() + local16.size() + global.size(); 693 // If there are relocation-only entries in the GOT, TLS entries 694 // are allocated after them. TLS entries should be addressable 695 // by 16-bit index so count both reloc-only and TLS entries. 696 if (!tls.empty() || !dynTlsSymbols.empty()) 697 count += relocs.size() + tls.size() + dynTlsSymbols.size() * 2; 698 return count; 699 } 700 701 MipsGotSection::FileGot &MipsGotSection::getGot(InputFile &f) { 702 if (!f.mipsGotIndex.hasValue()) { 703 gots.emplace_back(); 704 gots.back().file = &f; 705 f.mipsGotIndex = gots.size() - 1; 706 } 707 return gots[*f.mipsGotIndex]; 708 } 709 710 uint64_t MipsGotSection::getPageEntryOffset(const InputFile *f, 711 const Symbol &sym, 712 int64_t addend) const { 713 const FileGot &g = gots[*f->mipsGotIndex]; 714 uint64_t index = 0; 715 if (const OutputSection *outSec = sym.getOutputSection()) { 716 uint64_t secAddr = getMipsPageAddr(outSec->addr); 717 uint64_t symAddr = getMipsPageAddr(sym.getVA(addend)); 718 index = g.pagesMap.lookup(outSec).firstIndex + (symAddr - secAddr) / 0xffff; 719 } else { 720 index = g.local16.lookup({nullptr, getMipsPageAddr(sym.getVA(addend))}); 721 } 722 return index * config->wordsize; 723 } 724 725 uint64_t MipsGotSection::getSymEntryOffset(const InputFile *f, const Symbol &s, 726 int64_t addend) const { 727 const FileGot &g = gots[*f->mipsGotIndex]; 728 Symbol *sym = const_cast<Symbol *>(&s); 729 if (sym->isTls()) 730 return g.tls.lookup(sym) * config->wordsize; 731 if (sym->isPreemptible) 732 return g.global.lookup(sym) * config->wordsize; 733 return g.local16.lookup({sym, addend}) * config->wordsize; 734 } 735 736 uint64_t MipsGotSection::getTlsIndexOffset(const InputFile *f) const { 737 const FileGot &g = gots[*f->mipsGotIndex]; 738 return g.dynTlsSymbols.lookup(nullptr) * config->wordsize; 739 } 740 741 uint64_t MipsGotSection::getGlobalDynOffset(const InputFile *f, 742 const Symbol &s) const { 743 const FileGot &g = gots[*f->mipsGotIndex]; 744 Symbol *sym = const_cast<Symbol *>(&s); 745 return g.dynTlsSymbols.lookup(sym) * config->wordsize; 746 } 747 748 const Symbol *MipsGotSection::getFirstGlobalEntry() const { 749 if (gots.empty()) 750 return nullptr; 751 const FileGot &primGot = gots.front(); 752 if (!primGot.global.empty()) 753 return primGot.global.front().first; 754 if (!primGot.relocs.empty()) 755 return primGot.relocs.front().first; 756 return nullptr; 757 } 758 759 unsigned MipsGotSection::getLocalEntriesNum() const { 760 if (gots.empty()) 761 return headerEntriesNum; 762 return headerEntriesNum + gots.front().getPageEntriesNum() + 763 gots.front().local16.size(); 764 } 765 766 bool MipsGotSection::tryMergeGots(FileGot &dst, FileGot &src, bool isPrimary) { 767 FileGot tmp = dst; 768 set_union(tmp.pagesMap, src.pagesMap); 769 set_union(tmp.local16, src.local16); 770 set_union(tmp.global, src.global); 771 set_union(tmp.relocs, src.relocs); 772 set_union(tmp.tls, src.tls); 773 set_union(tmp.dynTlsSymbols, src.dynTlsSymbols); 774 775 size_t count = isPrimary ? headerEntriesNum : 0; 776 count += tmp.getIndexedEntriesNum(); 777 778 if (count * config->wordsize > config->mipsGotSize) 779 return false; 780 781 std::swap(tmp, dst); 782 return true; 783 } 784 785 void MipsGotSection::finalizeContents() { updateAllocSize(); } 786 787 bool MipsGotSection::updateAllocSize() { 788 size = headerEntriesNum * config->wordsize; 789 for (const FileGot &g : gots) 790 size += g.getEntriesNum() * config->wordsize; 791 return false; 792 } 793 794 void MipsGotSection::build() { 795 if (gots.empty()) 796 return; 797 798 std::vector<FileGot> mergedGots(1); 799 800 // For each GOT move non-preemptible symbols from the `Global` 801 // to `Local16` list. Preemptible symbol might become non-preemptible 802 // one if, for example, it gets a related copy relocation. 803 for (FileGot &got : gots) { 804 for (auto &p: got.global) 805 if (!p.first->isPreemptible) 806 got.local16.insert({{p.first, 0}, 0}); 807 got.global.remove_if([&](const std::pair<Symbol *, size_t> &p) { 808 return !p.first->isPreemptible; 809 }); 810 } 811 812 // For each GOT remove "reloc-only" entry if there is "global" 813 // entry for the same symbol. And add local entries which indexed 814 // using 32-bit value at the end of 16-bit entries. 815 for (FileGot &got : gots) { 816 got.relocs.remove_if([&](const std::pair<Symbol *, size_t> &p) { 817 return got.global.count(p.first); 818 }); 819 set_union(got.local16, got.local32); 820 got.local32.clear(); 821 } 822 823 // Evaluate number of "reloc-only" entries in the resulting GOT. 824 // To do that put all unique "reloc-only" and "global" entries 825 // from all GOTs to the future primary GOT. 826 FileGot *primGot = &mergedGots.front(); 827 for (FileGot &got : gots) { 828 set_union(primGot->relocs, got.global); 829 set_union(primGot->relocs, got.relocs); 830 got.relocs.clear(); 831 } 832 833 // Evaluate number of "page" entries in each GOT. 834 for (FileGot &got : gots) { 835 for (std::pair<const OutputSection *, FileGot::PageBlock> &p : 836 got.pagesMap) { 837 const OutputSection *os = p.first; 838 uint64_t secSize = 0; 839 for (BaseCommand *cmd : os->sectionCommands) { 840 if (auto *isd = dyn_cast<InputSectionDescription>(cmd)) 841 for (InputSection *isec : isd->sections) { 842 uint64_t off = alignTo(secSize, isec->alignment); 843 secSize = off + isec->getSize(); 844 } 845 } 846 p.second.count = getMipsPageCount(secSize); 847 } 848 } 849 850 // Merge GOTs. Try to join as much as possible GOTs but do not exceed 851 // maximum GOT size. At first, try to fill the primary GOT because 852 // the primary GOT can be accessed in the most effective way. If it 853 // is not possible, try to fill the last GOT in the list, and finally 854 // create a new GOT if both attempts failed. 855 for (FileGot &srcGot : gots) { 856 InputFile *file = srcGot.file; 857 if (tryMergeGots(mergedGots.front(), srcGot, true)) { 858 file->mipsGotIndex = 0; 859 } else { 860 // If this is the first time we failed to merge with the primary GOT, 861 // MergedGots.back() will also be the primary GOT. We must make sure not 862 // to try to merge again with isPrimary=false, as otherwise, if the 863 // inputs are just right, we could allow the primary GOT to become 1 or 2 864 // words bigger due to ignoring the header size. 865 if (mergedGots.size() == 1 || 866 !tryMergeGots(mergedGots.back(), srcGot, false)) { 867 mergedGots.emplace_back(); 868 std::swap(mergedGots.back(), srcGot); 869 } 870 file->mipsGotIndex = mergedGots.size() - 1; 871 } 872 } 873 std::swap(gots, mergedGots); 874 875 // Reduce number of "reloc-only" entries in the primary GOT 876 // by substracting "global" entries exist in the primary GOT. 877 primGot = &gots.front(); 878 primGot->relocs.remove_if([&](const std::pair<Symbol *, size_t> &p) { 879 return primGot->global.count(p.first); 880 }); 881 882 // Calculate indexes for each GOT entry. 883 size_t index = headerEntriesNum; 884 for (FileGot &got : gots) { 885 got.startIndex = &got == primGot ? 0 : index; 886 for (std::pair<const OutputSection *, FileGot::PageBlock> &p : 887 got.pagesMap) { 888 // For each output section referenced by GOT page relocations calculate 889 // and save into pagesMap an upper bound of MIPS GOT entries required 890 // to store page addresses of local symbols. We assume the worst case - 891 // each 64kb page of the output section has at least one GOT relocation 892 // against it. And take in account the case when the section intersects 893 // page boundaries. 894 p.second.firstIndex = index; 895 index += p.second.count; 896 } 897 for (auto &p: got.local16) 898 p.second = index++; 899 for (auto &p: got.global) 900 p.second = index++; 901 for (auto &p: got.relocs) 902 p.second = index++; 903 for (auto &p: got.tls) 904 p.second = index++; 905 for (auto &p: got.dynTlsSymbols) { 906 p.second = index; 907 index += 2; 908 } 909 } 910 911 // Update Symbol::gotIndex field to use this 912 // value later in the `sortMipsSymbols` function. 913 for (auto &p : primGot->global) 914 p.first->gotIndex = p.second; 915 for (auto &p : primGot->relocs) 916 p.first->gotIndex = p.second; 917 918 // Create dynamic relocations. 919 for (FileGot &got : gots) { 920 // Create dynamic relocations for TLS entries. 921 for (std::pair<Symbol *, size_t> &p : got.tls) { 922 Symbol *s = p.first; 923 uint64_t offset = p.second * config->wordsize; 924 if (s->isPreemptible) 925 mainPart->relaDyn->addReloc(target->tlsGotRel, this, offset, s); 926 } 927 for (std::pair<Symbol *, size_t> &p : got.dynTlsSymbols) { 928 Symbol *s = p.first; 929 uint64_t offset = p.second * config->wordsize; 930 if (s == nullptr) { 931 if (!config->isPic) 932 continue; 933 mainPart->relaDyn->addReloc(target->tlsModuleIndexRel, this, offset, s); 934 } else { 935 // When building a shared library we still need a dynamic relocation 936 // for the module index. Therefore only checking for 937 // S->isPreemptible is not sufficient (this happens e.g. for 938 // thread-locals that have been marked as local through a linker script) 939 if (!s->isPreemptible && !config->isPic) 940 continue; 941 mainPart->relaDyn->addReloc(target->tlsModuleIndexRel, this, offset, s); 942 // However, we can skip writing the TLS offset reloc for non-preemptible 943 // symbols since it is known even in shared libraries 944 if (!s->isPreemptible) 945 continue; 946 offset += config->wordsize; 947 mainPart->relaDyn->addReloc(target->tlsOffsetRel, this, offset, s); 948 } 949 } 950 951 // Do not create dynamic relocations for non-TLS 952 // entries in the primary GOT. 953 if (&got == primGot) 954 continue; 955 956 // Dynamic relocations for "global" entries. 957 for (const std::pair<Symbol *, size_t> &p : got.global) { 958 uint64_t offset = p.second * config->wordsize; 959 mainPart->relaDyn->addReloc(target->relativeRel, this, offset, p.first); 960 } 961 if (!config->isPic) 962 continue; 963 // Dynamic relocations for "local" entries in case of PIC. 964 for (const std::pair<const OutputSection *, FileGot::PageBlock> &l : 965 got.pagesMap) { 966 size_t pageCount = l.second.count; 967 for (size_t pi = 0; pi < pageCount; ++pi) { 968 uint64_t offset = (l.second.firstIndex + pi) * config->wordsize; 969 mainPart->relaDyn->addReloc({target->relativeRel, this, offset, l.first, 970 int64_t(pi * 0x10000)}); 971 } 972 } 973 for (const std::pair<GotEntry, size_t> &p : got.local16) { 974 uint64_t offset = p.second * config->wordsize; 975 mainPart->relaDyn->addReloc({target->relativeRel, this, offset, true, 976 p.first.first, p.first.second}); 977 } 978 } 979 } 980 981 bool MipsGotSection::isNeeded() const { 982 // We add the .got section to the result for dynamic MIPS target because 983 // its address and properties are mentioned in the .dynamic section. 984 return !config->relocatable; 985 } 986 987 uint64_t MipsGotSection::getGp(const InputFile *f) const { 988 // For files without related GOT or files refer a primary GOT 989 // returns "common" _gp value. For secondary GOTs calculate 990 // individual _gp values. 991 if (!f || !f->mipsGotIndex.hasValue() || *f->mipsGotIndex == 0) 992 return ElfSym::mipsGp->getVA(0); 993 return getVA() + gots[*f->mipsGotIndex].startIndex * config->wordsize + 994 0x7ff0; 995 } 996 997 void MipsGotSection::writeTo(uint8_t *buf) { 998 // Set the MSB of the second GOT slot. This is not required by any 999 // MIPS ABI documentation, though. 1000 // 1001 // There is a comment in glibc saying that "The MSB of got[1] of a 1002 // gnu object is set to identify gnu objects," and in GNU gold it 1003 // says "the second entry will be used by some runtime loaders". 1004 // But how this field is being used is unclear. 1005 // 1006 // We are not really willing to mimic other linkers behaviors 1007 // without understanding why they do that, but because all files 1008 // generated by GNU tools have this special GOT value, and because 1009 // we've been doing this for years, it is probably a safe bet to 1010 // keep doing this for now. We really need to revisit this to see 1011 // if we had to do this. 1012 writeUint(buf + config->wordsize, (uint64_t)1 << (config->wordsize * 8 - 1)); 1013 for (const FileGot &g : gots) { 1014 auto write = [&](size_t i, const Symbol *s, int64_t a) { 1015 uint64_t va = a; 1016 if (s) 1017 va = s->getVA(a); 1018 writeUint(buf + i * config->wordsize, va); 1019 }; 1020 // Write 'page address' entries to the local part of the GOT. 1021 for (const std::pair<const OutputSection *, FileGot::PageBlock> &l : 1022 g.pagesMap) { 1023 size_t pageCount = l.second.count; 1024 uint64_t firstPageAddr = getMipsPageAddr(l.first->addr); 1025 for (size_t pi = 0; pi < pageCount; ++pi) 1026 write(l.second.firstIndex + pi, nullptr, firstPageAddr + pi * 0x10000); 1027 } 1028 // Local, global, TLS, reloc-only entries. 1029 // If TLS entry has a corresponding dynamic relocations, leave it 1030 // initialized by zero. Write down adjusted TLS symbol's values otherwise. 1031 // To calculate the adjustments use offsets for thread-local storage. 1032 // https://www.linux-mips.org/wiki/NPTL 1033 for (const std::pair<GotEntry, size_t> &p : g.local16) 1034 write(p.second, p.first.first, p.first.second); 1035 // Write VA to the primary GOT only. For secondary GOTs that 1036 // will be done by REL32 dynamic relocations. 1037 if (&g == &gots.front()) 1038 for (const std::pair<const Symbol *, size_t> &p : g.global) 1039 write(p.second, p.first, 0); 1040 for (const std::pair<Symbol *, size_t> &p : g.relocs) 1041 write(p.second, p.first, 0); 1042 for (const std::pair<Symbol *, size_t> &p : g.tls) 1043 write(p.second, p.first, p.first->isPreemptible ? 0 : -0x7000); 1044 for (const std::pair<Symbol *, size_t> &p : g.dynTlsSymbols) { 1045 if (p.first == nullptr && !config->isPic) 1046 write(p.second, nullptr, 1); 1047 else if (p.first && !p.first->isPreemptible) { 1048 // If we are emitting PIC code with relocations we mustn't write 1049 // anything to the GOT here. When using Elf_Rel relocations the value 1050 // one will be treated as an addend and will cause crashes at runtime 1051 if (!config->isPic) 1052 write(p.second, nullptr, 1); 1053 write(p.second + 1, p.first, -0x8000); 1054 } 1055 } 1056 } 1057 } 1058 1059 // On PowerPC the .plt section is used to hold the table of function addresses 1060 // instead of the .got.plt, and the type is SHT_NOBITS similar to a .bss 1061 // section. I don't know why we have a BSS style type for the section but it is 1062 // consitent across both 64-bit PowerPC ABIs as well as the 32-bit PowerPC ABI. 1063 GotPltSection::GotPltSection() 1064 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, config->wordsize, 1065 ".got.plt") { 1066 if (config->emachine == EM_PPC) { 1067 name = ".plt"; 1068 } else if (config->emachine == EM_PPC64) { 1069 type = SHT_NOBITS; 1070 name = ".plt"; 1071 } 1072 } 1073 1074 void GotPltSection::addEntry(Symbol &sym) { 1075 assert(sym.pltIndex == entries.size()); 1076 entries.push_back(&sym); 1077 } 1078 1079 size_t GotPltSection::getSize() const { 1080 return (target->gotPltHeaderEntriesNum + entries.size()) * config->wordsize; 1081 } 1082 1083 void GotPltSection::writeTo(uint8_t *buf) { 1084 target->writeGotPltHeader(buf); 1085 buf += target->gotPltHeaderEntriesNum * config->wordsize; 1086 for (const Symbol *b : entries) { 1087 target->writeGotPlt(buf, *b); 1088 buf += config->wordsize; 1089 } 1090 } 1091 1092 bool GotPltSection::isNeeded() const { 1093 // We need to emit GOTPLT even if it's empty if there's a relocation relative 1094 // to it. 1095 return !entries.empty() || hasGotPltOffRel; 1096 } 1097 1098 static StringRef getIgotPltName() { 1099 // On ARM the IgotPltSection is part of the GotSection. 1100 if (config->emachine == EM_ARM) 1101 return ".got"; 1102 1103 // On PowerPC64 the GotPltSection is renamed to '.plt' so the IgotPltSection 1104 // needs to be named the same. 1105 if (config->emachine == EM_PPC64) 1106 return ".plt"; 1107 1108 return ".got.plt"; 1109 } 1110 1111 // On PowerPC64 the GotPltSection type is SHT_NOBITS so we have to follow suit 1112 // with the IgotPltSection. 1113 IgotPltSection::IgotPltSection() 1114 : SyntheticSection(SHF_ALLOC | SHF_WRITE, 1115 config->emachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS, 1116 config->wordsize, getIgotPltName()) {} 1117 1118 void IgotPltSection::addEntry(Symbol &sym) { 1119 assert(sym.pltIndex == entries.size()); 1120 entries.push_back(&sym); 1121 } 1122 1123 size_t IgotPltSection::getSize() const { 1124 return entries.size() * config->wordsize; 1125 } 1126 1127 void IgotPltSection::writeTo(uint8_t *buf) { 1128 for (const Symbol *b : entries) { 1129 target->writeIgotPlt(buf, *b); 1130 buf += config->wordsize; 1131 } 1132 } 1133 1134 StringTableSection::StringTableSection(StringRef name, bool dynamic) 1135 : SyntheticSection(dynamic ? (uint64_t)SHF_ALLOC : 0, SHT_STRTAB, 1, name), 1136 dynamic(dynamic) { 1137 // ELF string tables start with a NUL byte. 1138 addString(""); 1139 } 1140 1141 // Adds a string to the string table. If `hashIt` is true we hash and check for 1142 // duplicates. It is optional because the name of global symbols are already 1143 // uniqued and hashing them again has a big cost for a small value: uniquing 1144 // them with some other string that happens to be the same. 1145 unsigned StringTableSection::addString(StringRef s, bool hashIt) { 1146 if (hashIt) { 1147 auto r = stringMap.insert(std::make_pair(s, this->size)); 1148 if (!r.second) 1149 return r.first->second; 1150 } 1151 unsigned ret = this->size; 1152 this->size = this->size + s.size() + 1; 1153 strings.push_back(s); 1154 return ret; 1155 } 1156 1157 void StringTableSection::writeTo(uint8_t *buf) { 1158 for (StringRef s : strings) { 1159 memcpy(buf, s.data(), s.size()); 1160 buf[s.size()] = '\0'; 1161 buf += s.size() + 1; 1162 } 1163 } 1164 1165 // Returns the number of version definition entries. Because the first entry 1166 // is for the version definition itself, it is the number of versioned symbols 1167 // plus one. Note that we don't support multiple versions yet. 1168 static unsigned getVerDefNum() { return config->versionDefinitions.size() + 1; } 1169 1170 template <class ELFT> 1171 DynamicSection<ELFT>::DynamicSection() 1172 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_DYNAMIC, config->wordsize, 1173 ".dynamic") { 1174 this->entsize = ELFT::Is64Bits ? 16 : 8; 1175 1176 // .dynamic section is not writable on MIPS and on Fuchsia OS 1177 // which passes -z rodynamic. 1178 // See "Special Section" in Chapter 4 in the following document: 1179 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 1180 if (config->emachine == EM_MIPS || config->zRodynamic) 1181 this->flags = SHF_ALLOC; 1182 } 1183 1184 template <class ELFT> 1185 void DynamicSection<ELFT>::add(int32_t tag, std::function<uint64_t()> fn) { 1186 entries.push_back({tag, fn}); 1187 } 1188 1189 template <class ELFT> 1190 void DynamicSection<ELFT>::addInt(int32_t tag, uint64_t val) { 1191 entries.push_back({tag, [=] { return val; }}); 1192 } 1193 1194 template <class ELFT> 1195 void DynamicSection<ELFT>::addInSec(int32_t tag, InputSection *sec) { 1196 entries.push_back({tag, [=] { return sec->getVA(0); }}); 1197 } 1198 1199 template <class ELFT> 1200 void DynamicSection<ELFT>::addInSecRelative(int32_t tag, InputSection *sec) { 1201 size_t tagOffset = entries.size() * entsize; 1202 entries.push_back( 1203 {tag, [=] { return sec->getVA(0) - (getVA() + tagOffset); }}); 1204 } 1205 1206 template <class ELFT> 1207 void DynamicSection<ELFT>::addOutSec(int32_t tag, OutputSection *sec) { 1208 entries.push_back({tag, [=] { return sec->addr; }}); 1209 } 1210 1211 template <class ELFT> 1212 void DynamicSection<ELFT>::addSize(int32_t tag, OutputSection *sec) { 1213 entries.push_back({tag, [=] { return sec->size; }}); 1214 } 1215 1216 template <class ELFT> 1217 void DynamicSection<ELFT>::addSym(int32_t tag, Symbol *sym) { 1218 entries.push_back({tag, [=] { return sym->getVA(); }}); 1219 } 1220 1221 // A Linker script may assign the RELA relocation sections to the same 1222 // output section. When this occurs we cannot just use the OutputSection 1223 // Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to 1224 // overlap with the [DT_RELA, DT_RELA + DT_RELASZ). 1225 static uint64_t addPltRelSz() { 1226 size_t size = in.relaPlt->getSize(); 1227 if (in.relaIplt->getParent() == in.relaPlt->getParent() && 1228 in.relaIplt->name == in.relaPlt->name) 1229 size += in.relaIplt->getSize(); 1230 return size; 1231 } 1232 1233 // Add remaining entries to complete .dynamic contents. 1234 template <class ELFT> void DynamicSection<ELFT>::finalizeContents() { 1235 elf::Partition &part = getPartition(); 1236 bool isMain = part.name.empty(); 1237 1238 for (StringRef s : config->filterList) 1239 addInt(DT_FILTER, part.dynStrTab->addString(s)); 1240 for (StringRef s : config->auxiliaryList) 1241 addInt(DT_AUXILIARY, part.dynStrTab->addString(s)); 1242 1243 if (!config->rpath.empty()) 1244 addInt(config->enableNewDtags ? DT_RUNPATH : DT_RPATH, 1245 part.dynStrTab->addString(config->rpath)); 1246 1247 for (SharedFile *file : sharedFiles) 1248 if (file->isNeeded) 1249 addInt(DT_NEEDED, part.dynStrTab->addString(file->soName)); 1250 1251 if (isMain) { 1252 if (!config->soName.empty()) 1253 addInt(DT_SONAME, part.dynStrTab->addString(config->soName)); 1254 } else { 1255 if (!config->soName.empty()) 1256 addInt(DT_NEEDED, part.dynStrTab->addString(config->soName)); 1257 addInt(DT_SONAME, part.dynStrTab->addString(part.name)); 1258 } 1259 1260 // Set DT_FLAGS and DT_FLAGS_1. 1261 uint32_t dtFlags = 0; 1262 uint32_t dtFlags1 = 0; 1263 if (config->bsymbolic) 1264 dtFlags |= DF_SYMBOLIC; 1265 if (config->zGlobal) 1266 dtFlags1 |= DF_1_GLOBAL; 1267 if (config->zInitfirst) 1268 dtFlags1 |= DF_1_INITFIRST; 1269 if (config->zInterpose) 1270 dtFlags1 |= DF_1_INTERPOSE; 1271 if (config->zNodefaultlib) 1272 dtFlags1 |= DF_1_NODEFLIB; 1273 if (config->zNodelete) 1274 dtFlags1 |= DF_1_NODELETE; 1275 if (config->zNodlopen) 1276 dtFlags1 |= DF_1_NOOPEN; 1277 if (config->zNow) { 1278 dtFlags |= DF_BIND_NOW; 1279 dtFlags1 |= DF_1_NOW; 1280 } 1281 if (config->zOrigin) { 1282 dtFlags |= DF_ORIGIN; 1283 dtFlags1 |= DF_1_ORIGIN; 1284 } 1285 if (!config->zText) 1286 dtFlags |= DF_TEXTREL; 1287 if (config->hasStaticTlsModel) 1288 dtFlags |= DF_STATIC_TLS; 1289 1290 if (dtFlags) 1291 addInt(DT_FLAGS, dtFlags); 1292 if (dtFlags1) 1293 addInt(DT_FLAGS_1, dtFlags1); 1294 1295 // DT_DEBUG is a pointer to debug informaion used by debuggers at runtime. We 1296 // need it for each process, so we don't write it for DSOs. The loader writes 1297 // the pointer into this entry. 1298 // 1299 // DT_DEBUG is the only .dynamic entry that needs to be written to. Some 1300 // systems (currently only Fuchsia OS) provide other means to give the 1301 // debugger this information. Such systems may choose make .dynamic read-only. 1302 // If the target is such a system (used -z rodynamic) don't write DT_DEBUG. 1303 if (!config->shared && !config->relocatable && !config->zRodynamic) 1304 addInt(DT_DEBUG, 0); 1305 1306 if (OutputSection *sec = part.dynStrTab->getParent()) 1307 this->link = sec->sectionIndex; 1308 1309 if (part.relaDyn->isNeeded()) { 1310 addInSec(part.relaDyn->dynamicTag, part.relaDyn); 1311 addSize(part.relaDyn->sizeDynamicTag, part.relaDyn->getParent()); 1312 1313 bool isRela = config->isRela; 1314 addInt(isRela ? DT_RELAENT : DT_RELENT, 1315 isRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel)); 1316 1317 // MIPS dynamic loader does not support RELCOUNT tag. 1318 // The problem is in the tight relation between dynamic 1319 // relocations and GOT. So do not emit this tag on MIPS. 1320 if (config->emachine != EM_MIPS) { 1321 size_t numRelativeRels = part.relaDyn->getRelativeRelocCount(); 1322 if (config->zCombreloc && numRelativeRels) 1323 addInt(isRela ? DT_RELACOUNT : DT_RELCOUNT, numRelativeRels); 1324 } 1325 } 1326 if (part.relrDyn && !part.relrDyn->relocs.empty()) { 1327 addInSec(config->useAndroidRelrTags ? DT_ANDROID_RELR : DT_RELR, 1328 part.relrDyn); 1329 addSize(config->useAndroidRelrTags ? DT_ANDROID_RELRSZ : DT_RELRSZ, 1330 part.relrDyn->getParent()); 1331 addInt(config->useAndroidRelrTags ? DT_ANDROID_RELRENT : DT_RELRENT, 1332 sizeof(Elf_Relr)); 1333 } 1334 // .rel[a].plt section usually consists of two parts, containing plt and 1335 // iplt relocations. It is possible to have only iplt relocations in the 1336 // output. In that case relaPlt is empty and have zero offset, the same offset 1337 // as relaIplt has. And we still want to emit proper dynamic tags for that 1338 // case, so here we always use relaPlt as marker for the begining of 1339 // .rel[a].plt section. 1340 if (isMain && (in.relaPlt->isNeeded() || in.relaIplt->isNeeded())) { 1341 addInSec(DT_JMPREL, in.relaPlt); 1342 entries.push_back({DT_PLTRELSZ, addPltRelSz}); 1343 switch (config->emachine) { 1344 case EM_MIPS: 1345 addInSec(DT_MIPS_PLTGOT, in.gotPlt); 1346 break; 1347 case EM_SPARCV9: 1348 addInSec(DT_PLTGOT, in.plt); 1349 break; 1350 default: 1351 addInSec(DT_PLTGOT, in.gotPlt); 1352 break; 1353 } 1354 addInt(DT_PLTREL, config->isRela ? DT_RELA : DT_REL); 1355 } 1356 1357 if (config->emachine == EM_AARCH64) { 1358 if (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 1359 addInt(DT_AARCH64_BTI_PLT, 0); 1360 if (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_PAC) 1361 addInt(DT_AARCH64_PAC_PLT, 0); 1362 } 1363 1364 addInSec(DT_SYMTAB, part.dynSymTab); 1365 addInt(DT_SYMENT, sizeof(Elf_Sym)); 1366 addInSec(DT_STRTAB, part.dynStrTab); 1367 addInt(DT_STRSZ, part.dynStrTab->getSize()); 1368 if (!config->zText) 1369 addInt(DT_TEXTREL, 0); 1370 if (part.gnuHashTab) 1371 addInSec(DT_GNU_HASH, part.gnuHashTab); 1372 if (part.hashTab) 1373 addInSec(DT_HASH, part.hashTab); 1374 1375 if (isMain) { 1376 if (Out::preinitArray) { 1377 addOutSec(DT_PREINIT_ARRAY, Out::preinitArray); 1378 addSize(DT_PREINIT_ARRAYSZ, Out::preinitArray); 1379 } 1380 if (Out::initArray) { 1381 addOutSec(DT_INIT_ARRAY, Out::initArray); 1382 addSize(DT_INIT_ARRAYSZ, Out::initArray); 1383 } 1384 if (Out::finiArray) { 1385 addOutSec(DT_FINI_ARRAY, Out::finiArray); 1386 addSize(DT_FINI_ARRAYSZ, Out::finiArray); 1387 } 1388 1389 if (Symbol *b = symtab->find(config->init)) 1390 if (b->isDefined()) 1391 addSym(DT_INIT, b); 1392 if (Symbol *b = symtab->find(config->fini)) 1393 if (b->isDefined()) 1394 addSym(DT_FINI, b); 1395 } 1396 1397 bool hasVerNeed = SharedFile::vernauxNum != 0; 1398 if (hasVerNeed || part.verDef) 1399 addInSec(DT_VERSYM, part.verSym); 1400 if (part.verDef) { 1401 addInSec(DT_VERDEF, part.verDef); 1402 addInt(DT_VERDEFNUM, getVerDefNum()); 1403 } 1404 if (hasVerNeed) { 1405 addInSec(DT_VERNEED, part.verNeed); 1406 unsigned needNum = 0; 1407 for (SharedFile *f : sharedFiles) 1408 if (!f->vernauxs.empty()) 1409 ++needNum; 1410 addInt(DT_VERNEEDNUM, needNum); 1411 } 1412 1413 if (config->emachine == EM_MIPS) { 1414 addInt(DT_MIPS_RLD_VERSION, 1); 1415 addInt(DT_MIPS_FLAGS, RHF_NOTPOT); 1416 addInt(DT_MIPS_BASE_ADDRESS, target->getImageBase()); 1417 addInt(DT_MIPS_SYMTABNO, part.dynSymTab->getNumSymbols()); 1418 1419 add(DT_MIPS_LOCAL_GOTNO, [] { return in.mipsGot->getLocalEntriesNum(); }); 1420 1421 if (const Symbol *b = in.mipsGot->getFirstGlobalEntry()) 1422 addInt(DT_MIPS_GOTSYM, b->dynsymIndex); 1423 else 1424 addInt(DT_MIPS_GOTSYM, part.dynSymTab->getNumSymbols()); 1425 addInSec(DT_PLTGOT, in.mipsGot); 1426 if (in.mipsRldMap) { 1427 if (!config->pie) 1428 addInSec(DT_MIPS_RLD_MAP, in.mipsRldMap); 1429 // Store the offset to the .rld_map section 1430 // relative to the address of the tag. 1431 addInSecRelative(DT_MIPS_RLD_MAP_REL, in.mipsRldMap); 1432 } 1433 } 1434 1435 // DT_PPC_GOT indicates to glibc Secure PLT is used. If DT_PPC_GOT is absent, 1436 // glibc assumes the old-style BSS PLT layout which we don't support. 1437 if (config->emachine == EM_PPC) 1438 add(DT_PPC_GOT, [] { return in.got->getVA(); }); 1439 1440 // Glink dynamic tag is required by the V2 abi if the plt section isn't empty. 1441 if (config->emachine == EM_PPC64 && in.plt->isNeeded()) { 1442 // The Glink tag points to 32 bytes before the first lazy symbol resolution 1443 // stub, which starts directly after the header. 1444 entries.push_back({DT_PPC64_GLINK, [=] { 1445 unsigned offset = target->pltHeaderSize - 32; 1446 return in.plt->getVA(0) + offset; 1447 }}); 1448 } 1449 1450 addInt(DT_NULL, 0); 1451 1452 getParent()->link = this->link; 1453 this->size = entries.size() * this->entsize; 1454 } 1455 1456 template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *buf) { 1457 auto *p = reinterpret_cast<Elf_Dyn *>(buf); 1458 1459 for (std::pair<int32_t, std::function<uint64_t()>> &kv : entries) { 1460 p->d_tag = kv.first; 1461 p->d_un.d_val = kv.second(); 1462 ++p; 1463 } 1464 } 1465 1466 uint64_t DynamicReloc::getOffset() const { 1467 return inputSec->getVA(offsetInSec); 1468 } 1469 1470 int64_t DynamicReloc::computeAddend() const { 1471 if (useSymVA) 1472 return sym->getVA(addend); 1473 if (!outputSec) 1474 return addend; 1475 // See the comment in the DynamicReloc ctor. 1476 return getMipsPageAddr(outputSec->addr) + addend; 1477 } 1478 1479 uint32_t DynamicReloc::getSymIndex(SymbolTableBaseSection *symTab) const { 1480 if (sym && !useSymVA) 1481 return symTab->getSymbolIndex(sym); 1482 return 0; 1483 } 1484 1485 RelocationBaseSection::RelocationBaseSection(StringRef name, uint32_t type, 1486 int32_t dynamicTag, 1487 int32_t sizeDynamicTag) 1488 : SyntheticSection(SHF_ALLOC, type, config->wordsize, name), 1489 dynamicTag(dynamicTag), sizeDynamicTag(sizeDynamicTag) {} 1490 1491 void RelocationBaseSection::addReloc(RelType dynType, InputSectionBase *isec, 1492 uint64_t offsetInSec, Symbol *sym) { 1493 addReloc({dynType, isec, offsetInSec, false, sym, 0}); 1494 } 1495 1496 void RelocationBaseSection::addReloc(RelType dynType, 1497 InputSectionBase *inputSec, 1498 uint64_t offsetInSec, Symbol *sym, 1499 int64_t addend, RelExpr expr, 1500 RelType type) { 1501 // Write the addends to the relocated address if required. We skip 1502 // it if the written value would be zero. 1503 if (config->writeAddends && (expr != R_ADDEND || addend != 0)) 1504 inputSec->relocations.push_back({expr, type, offsetInSec, addend, sym}); 1505 addReloc({dynType, inputSec, offsetInSec, expr != R_ADDEND, sym, addend}); 1506 } 1507 1508 void RelocationBaseSection::addReloc(const DynamicReloc &reloc) { 1509 if (reloc.type == target->relativeRel) 1510 ++numRelativeRelocs; 1511 relocs.push_back(reloc); 1512 } 1513 1514 void RelocationBaseSection::finalizeContents() { 1515 SymbolTableBaseSection *symTab = getPartition().dynSymTab; 1516 1517 // When linking glibc statically, .rel{,a}.plt contains R_*_IRELATIVE 1518 // relocations due to IFUNC (e.g. strcpy). sh_link will be set to 0 in that 1519 // case. 1520 if (symTab && symTab->getParent()) 1521 getParent()->link = symTab->getParent()->sectionIndex; 1522 else 1523 getParent()->link = 0; 1524 1525 if (in.relaPlt == this) 1526 getParent()->info = in.gotPlt->getParent()->sectionIndex; 1527 if (in.relaIplt == this) 1528 getParent()->info = in.igotPlt->getParent()->sectionIndex; 1529 } 1530 1531 RelrBaseSection::RelrBaseSection() 1532 : SyntheticSection(SHF_ALLOC, 1533 config->useAndroidRelrTags ? SHT_ANDROID_RELR : SHT_RELR, 1534 config->wordsize, ".relr.dyn") {} 1535 1536 template <class ELFT> 1537 static void encodeDynamicReloc(SymbolTableBaseSection *symTab, 1538 typename ELFT::Rela *p, 1539 const DynamicReloc &rel) { 1540 if (config->isRela) 1541 p->r_addend = rel.computeAddend(); 1542 p->r_offset = rel.getOffset(); 1543 p->setSymbolAndType(rel.getSymIndex(symTab), rel.type, config->isMips64EL); 1544 } 1545 1546 template <class ELFT> 1547 RelocationSection<ELFT>::RelocationSection(StringRef name, bool sort) 1548 : RelocationBaseSection(name, config->isRela ? SHT_RELA : SHT_REL, 1549 config->isRela ? DT_RELA : DT_REL, 1550 config->isRela ? DT_RELASZ : DT_RELSZ), 1551 sort(sort) { 1552 this->entsize = config->isRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel); 1553 } 1554 1555 template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *buf) { 1556 SymbolTableBaseSection *symTab = getPartition().dynSymTab; 1557 1558 // Sort by (!IsRelative,SymIndex,r_offset). DT_REL[A]COUNT requires us to 1559 // place R_*_RELATIVE first. SymIndex is to improve locality, while r_offset 1560 // is to make results easier to read. 1561 if (sort) 1562 llvm::stable_sort( 1563 relocs, [&](const DynamicReloc &a, const DynamicReloc &b) { 1564 return std::make_tuple(a.type != target->relativeRel, 1565 a.getSymIndex(symTab), a.getOffset()) < 1566 std::make_tuple(b.type != target->relativeRel, 1567 b.getSymIndex(symTab), b.getOffset()); 1568 }); 1569 1570 for (const DynamicReloc &rel : relocs) { 1571 encodeDynamicReloc<ELFT>(symTab, reinterpret_cast<Elf_Rela *>(buf), rel); 1572 buf += config->isRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel); 1573 } 1574 } 1575 1576 template <class ELFT> 1577 AndroidPackedRelocationSection<ELFT>::AndroidPackedRelocationSection( 1578 StringRef name) 1579 : RelocationBaseSection( 1580 name, config->isRela ? SHT_ANDROID_RELA : SHT_ANDROID_REL, 1581 config->isRela ? DT_ANDROID_RELA : DT_ANDROID_REL, 1582 config->isRela ? DT_ANDROID_RELASZ : DT_ANDROID_RELSZ) { 1583 this->entsize = 1; 1584 } 1585 1586 template <class ELFT> 1587 bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() { 1588 // This function computes the contents of an Android-format packed relocation 1589 // section. 1590 // 1591 // This format compresses relocations by using relocation groups to factor out 1592 // fields that are common between relocations and storing deltas from previous 1593 // relocations in SLEB128 format (which has a short representation for small 1594 // numbers). A good example of a relocation type with common fields is 1595 // R_*_RELATIVE, which is normally used to represent function pointers in 1596 // vtables. In the REL format, each relative relocation has the same r_info 1597 // field, and is only different from other relative relocations in terms of 1598 // the r_offset field. By sorting relocations by offset, grouping them by 1599 // r_info and representing each relocation with only the delta from the 1600 // previous offset, each 8-byte relocation can be compressed to as little as 1 1601 // byte (or less with run-length encoding). This relocation packer was able to 1602 // reduce the size of the relocation section in an Android Chromium DSO from 1603 // 2,911,184 bytes to 174,693 bytes, or 6% of the original size. 1604 // 1605 // A relocation section consists of a header containing the literal bytes 1606 // 'APS2' followed by a sequence of SLEB128-encoded integers. The first two 1607 // elements are the total number of relocations in the section and an initial 1608 // r_offset value. The remaining elements define a sequence of relocation 1609 // groups. Each relocation group starts with a header consisting of the 1610 // following elements: 1611 // 1612 // - the number of relocations in the relocation group 1613 // - flags for the relocation group 1614 // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is set) the r_offset delta 1615 // for each relocation in the group. 1616 // - (if RELOCATION_GROUPED_BY_INFO_FLAG is set) the value of the r_info 1617 // field for each relocation in the group. 1618 // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG and 1619 // RELOCATION_GROUPED_BY_ADDEND_FLAG are set) the r_addend delta for 1620 // each relocation in the group. 1621 // 1622 // Following the relocation group header are descriptions of each of the 1623 // relocations in the group. They consist of the following elements: 1624 // 1625 // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is not set) the r_offset 1626 // delta for this relocation. 1627 // - (if RELOCATION_GROUPED_BY_INFO_FLAG is not set) the value of the r_info 1628 // field for this relocation. 1629 // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG is set and 1630 // RELOCATION_GROUPED_BY_ADDEND_FLAG is not set) the r_addend delta for 1631 // this relocation. 1632 1633 size_t oldSize = relocData.size(); 1634 1635 relocData = {'A', 'P', 'S', '2'}; 1636 raw_svector_ostream os(relocData); 1637 auto add = [&](int64_t v) { encodeSLEB128(v, os); }; 1638 1639 // The format header includes the number of relocations and the initial 1640 // offset (we set this to zero because the first relocation group will 1641 // perform the initial adjustment). 1642 add(relocs.size()); 1643 add(0); 1644 1645 std::vector<Elf_Rela> relatives, nonRelatives; 1646 1647 for (const DynamicReloc &rel : relocs) { 1648 Elf_Rela r; 1649 encodeDynamicReloc<ELFT>(getPartition().dynSymTab, &r, rel); 1650 1651 if (r.getType(config->isMips64EL) == target->relativeRel) 1652 relatives.push_back(r); 1653 else 1654 nonRelatives.push_back(r); 1655 } 1656 1657 llvm::sort(relatives, [](const Elf_Rel &a, const Elf_Rel &b) { 1658 return a.r_offset < b.r_offset; 1659 }); 1660 1661 // Try to find groups of relative relocations which are spaced one word 1662 // apart from one another. These generally correspond to vtable entries. The 1663 // format allows these groups to be encoded using a sort of run-length 1664 // encoding, but each group will cost 7 bytes in addition to the offset from 1665 // the previous group, so it is only profitable to do this for groups of 1666 // size 8 or larger. 1667 std::vector<Elf_Rela> ungroupedRelatives; 1668 std::vector<std::vector<Elf_Rela>> relativeGroups; 1669 for (auto i = relatives.begin(), e = relatives.end(); i != e;) { 1670 std::vector<Elf_Rela> group; 1671 do { 1672 group.push_back(*i++); 1673 } while (i != e && (i - 1)->r_offset + config->wordsize == i->r_offset); 1674 1675 if (group.size() < 8) 1676 ungroupedRelatives.insert(ungroupedRelatives.end(), group.begin(), 1677 group.end()); 1678 else 1679 relativeGroups.emplace_back(std::move(group)); 1680 } 1681 1682 unsigned hasAddendIfRela = 1683 config->isRela ? RELOCATION_GROUP_HAS_ADDEND_FLAG : 0; 1684 1685 uint64_t offset = 0; 1686 uint64_t addend = 0; 1687 1688 // Emit the run-length encoding for the groups of adjacent relative 1689 // relocations. Each group is represented using two groups in the packed 1690 // format. The first is used to set the current offset to the start of the 1691 // group (and also encodes the first relocation), and the second encodes the 1692 // remaining relocations. 1693 for (std::vector<Elf_Rela> &g : relativeGroups) { 1694 // The first relocation in the group. 1695 add(1); 1696 add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG | 1697 RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela); 1698 add(g[0].r_offset - offset); 1699 add(target->relativeRel); 1700 if (config->isRela) { 1701 add(g[0].r_addend - addend); 1702 addend = g[0].r_addend; 1703 } 1704 1705 // The remaining relocations. 1706 add(g.size() - 1); 1707 add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG | 1708 RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela); 1709 add(config->wordsize); 1710 add(target->relativeRel); 1711 if (config->isRela) { 1712 for (auto i = g.begin() + 1, e = g.end(); i != e; ++i) { 1713 add(i->r_addend - addend); 1714 addend = i->r_addend; 1715 } 1716 } 1717 1718 offset = g.back().r_offset; 1719 } 1720 1721 // Now the ungrouped relatives. 1722 if (!ungroupedRelatives.empty()) { 1723 add(ungroupedRelatives.size()); 1724 add(RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela); 1725 add(target->relativeRel); 1726 for (Elf_Rela &r : ungroupedRelatives) { 1727 add(r.r_offset - offset); 1728 offset = r.r_offset; 1729 if (config->isRela) { 1730 add(r.r_addend - addend); 1731 addend = r.r_addend; 1732 } 1733 } 1734 } 1735 1736 // Finally the non-relative relocations. 1737 llvm::sort(nonRelatives, [](const Elf_Rela &a, const Elf_Rela &b) { 1738 return a.r_offset < b.r_offset; 1739 }); 1740 if (!nonRelatives.empty()) { 1741 add(nonRelatives.size()); 1742 add(hasAddendIfRela); 1743 for (Elf_Rela &r : nonRelatives) { 1744 add(r.r_offset - offset); 1745 offset = r.r_offset; 1746 add(r.r_info); 1747 if (config->isRela) { 1748 add(r.r_addend - addend); 1749 addend = r.r_addend; 1750 } 1751 } 1752 } 1753 1754 // Don't allow the section to shrink; otherwise the size of the section can 1755 // oscillate infinitely. 1756 if (relocData.size() < oldSize) 1757 relocData.append(oldSize - relocData.size(), 0); 1758 1759 // Returns whether the section size changed. We need to keep recomputing both 1760 // section layout and the contents of this section until the size converges 1761 // because changing this section's size can affect section layout, which in 1762 // turn can affect the sizes of the LEB-encoded integers stored in this 1763 // section. 1764 return relocData.size() != oldSize; 1765 } 1766 1767 template <class ELFT> RelrSection<ELFT>::RelrSection() { 1768 this->entsize = config->wordsize; 1769 } 1770 1771 template <class ELFT> bool RelrSection<ELFT>::updateAllocSize() { 1772 // This function computes the contents of an SHT_RELR packed relocation 1773 // section. 1774 // 1775 // Proposal for adding SHT_RELR sections to generic-abi is here: 1776 // https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg 1777 // 1778 // The encoded sequence of Elf64_Relr entries in a SHT_RELR section looks 1779 // like [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] 1780 // 1781 // i.e. start with an address, followed by any number of bitmaps. The address 1782 // entry encodes 1 relocation. The subsequent bitmap entries encode up to 63 1783 // relocations each, at subsequent offsets following the last address entry. 1784 // 1785 // The bitmap entries must have 1 in the least significant bit. The assumption 1786 // here is that an address cannot have 1 in lsb. Odd addresses are not 1787 // supported. 1788 // 1789 // Excluding the least significant bit in the bitmap, each non-zero bit in 1790 // the bitmap represents a relocation to be applied to a corresponding machine 1791 // word that follows the base address word. The second least significant bit 1792 // represents the machine word immediately following the initial address, and 1793 // each bit that follows represents the next word, in linear order. As such, 1794 // a single bitmap can encode up to 31 relocations in a 32-bit object, and 1795 // 63 relocations in a 64-bit object. 1796 // 1797 // This encoding has a couple of interesting properties: 1798 // 1. Looking at any entry, it is clear whether it's an address or a bitmap: 1799 // even means address, odd means bitmap. 1800 // 2. Just a simple list of addresses is a valid encoding. 1801 1802 size_t oldSize = relrRelocs.size(); 1803 relrRelocs.clear(); 1804 1805 // Same as Config->Wordsize but faster because this is a compile-time 1806 // constant. 1807 const size_t wordsize = sizeof(typename ELFT::uint); 1808 1809 // Number of bits to use for the relocation offsets bitmap. 1810 // Must be either 63 or 31. 1811 const size_t nBits = wordsize * 8 - 1; 1812 1813 // Get offsets for all relative relocations and sort them. 1814 std::vector<uint64_t> offsets; 1815 for (const RelativeReloc &rel : relocs) 1816 offsets.push_back(rel.getOffset()); 1817 llvm::sort(offsets); 1818 1819 // For each leading relocation, find following ones that can be folded 1820 // as a bitmap and fold them. 1821 for (size_t i = 0, e = offsets.size(); i < e;) { 1822 // Add a leading relocation. 1823 relrRelocs.push_back(Elf_Relr(offsets[i])); 1824 uint64_t base = offsets[i] + wordsize; 1825 ++i; 1826 1827 // Find foldable relocations to construct bitmaps. 1828 while (i < e) { 1829 uint64_t bitmap = 0; 1830 1831 while (i < e) { 1832 uint64_t delta = offsets[i] - base; 1833 1834 // If it is too far, it cannot be folded. 1835 if (delta >= nBits * wordsize) 1836 break; 1837 1838 // If it is not a multiple of wordsize away, it cannot be folded. 1839 if (delta % wordsize) 1840 break; 1841 1842 // Fold it. 1843 bitmap |= 1ULL << (delta / wordsize); 1844 ++i; 1845 } 1846 1847 if (!bitmap) 1848 break; 1849 1850 relrRelocs.push_back(Elf_Relr((bitmap << 1) | 1)); 1851 base += nBits * wordsize; 1852 } 1853 } 1854 1855 return relrRelocs.size() != oldSize; 1856 } 1857 1858 SymbolTableBaseSection::SymbolTableBaseSection(StringTableSection &strTabSec) 1859 : SyntheticSection(strTabSec.isDynamic() ? (uint64_t)SHF_ALLOC : 0, 1860 strTabSec.isDynamic() ? SHT_DYNSYM : SHT_SYMTAB, 1861 config->wordsize, 1862 strTabSec.isDynamic() ? ".dynsym" : ".symtab"), 1863 strTabSec(strTabSec) {} 1864 1865 // Orders symbols according to their positions in the GOT, 1866 // in compliance with MIPS ABI rules. 1867 // See "Global Offset Table" in Chapter 5 in the following document 1868 // for detailed description: 1869 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 1870 static bool sortMipsSymbols(const SymbolTableEntry &l, 1871 const SymbolTableEntry &r) { 1872 // Sort entries related to non-local preemptible symbols by GOT indexes. 1873 // All other entries go to the beginning of a dynsym in arbitrary order. 1874 if (l.sym->isInGot() && r.sym->isInGot()) 1875 return l.sym->gotIndex < r.sym->gotIndex; 1876 if (!l.sym->isInGot() && !r.sym->isInGot()) 1877 return false; 1878 return !l.sym->isInGot(); 1879 } 1880 1881 void SymbolTableBaseSection::finalizeContents() { 1882 if (OutputSection *sec = strTabSec.getParent()) 1883 getParent()->link = sec->sectionIndex; 1884 1885 if (this->type != SHT_DYNSYM) { 1886 sortSymTabSymbols(); 1887 return; 1888 } 1889 1890 // If it is a .dynsym, there should be no local symbols, but we need 1891 // to do a few things for the dynamic linker. 1892 1893 // Section's Info field has the index of the first non-local symbol. 1894 // Because the first symbol entry is a null entry, 1 is the first. 1895 getParent()->info = 1; 1896 1897 if (getPartition().gnuHashTab) { 1898 // NB: It also sorts Symbols to meet the GNU hash table requirements. 1899 getPartition().gnuHashTab->addSymbols(symbols); 1900 } else if (config->emachine == EM_MIPS) { 1901 llvm::stable_sort(symbols, sortMipsSymbols); 1902 } 1903 1904 // Only the main partition's dynsym indexes are stored in the symbols 1905 // themselves. All other partitions use a lookup table. 1906 if (this == mainPart->dynSymTab) { 1907 size_t i = 0; 1908 for (const SymbolTableEntry &s : symbols) 1909 s.sym->dynsymIndex = ++i; 1910 } 1911 } 1912 1913 // The ELF spec requires that all local symbols precede global symbols, so we 1914 // sort symbol entries in this function. (For .dynsym, we don't do that because 1915 // symbols for dynamic linking are inherently all globals.) 1916 // 1917 // Aside from above, we put local symbols in groups starting with the STT_FILE 1918 // symbol. That is convenient for purpose of identifying where are local symbols 1919 // coming from. 1920 void SymbolTableBaseSection::sortSymTabSymbols() { 1921 // Move all local symbols before global symbols. 1922 auto e = std::stable_partition( 1923 symbols.begin(), symbols.end(), [](const SymbolTableEntry &s) { 1924 return s.sym->isLocal() || s.sym->computeBinding() == STB_LOCAL; 1925 }); 1926 size_t numLocals = e - symbols.begin(); 1927 getParent()->info = numLocals + 1; 1928 1929 // We want to group the local symbols by file. For that we rebuild the local 1930 // part of the symbols vector. We do not need to care about the STT_FILE 1931 // symbols, they are already naturally placed first in each group. That 1932 // happens because STT_FILE is always the first symbol in the object and hence 1933 // precede all other local symbols we add for a file. 1934 MapVector<InputFile *, std::vector<SymbolTableEntry>> arr; 1935 for (const SymbolTableEntry &s : llvm::make_range(symbols.begin(), e)) 1936 arr[s.sym->file].push_back(s); 1937 1938 auto i = symbols.begin(); 1939 for (std::pair<InputFile *, std::vector<SymbolTableEntry>> &p : arr) 1940 for (SymbolTableEntry &entry : p.second) 1941 *i++ = entry; 1942 } 1943 1944 void SymbolTableBaseSection::addSymbol(Symbol *b) { 1945 // Adding a local symbol to a .dynsym is a bug. 1946 assert(this->type != SHT_DYNSYM || !b->isLocal()); 1947 1948 bool hashIt = b->isLocal(); 1949 symbols.push_back({b, strTabSec.addString(b->getName(), hashIt)}); 1950 } 1951 1952 size_t SymbolTableBaseSection::getSymbolIndex(Symbol *sym) { 1953 if (this == mainPart->dynSymTab) 1954 return sym->dynsymIndex; 1955 1956 // Initializes symbol lookup tables lazily. This is used only for -r, 1957 // -emit-relocs and dynsyms in partitions other than the main one. 1958 llvm::call_once(onceFlag, [&] { 1959 symbolIndexMap.reserve(symbols.size()); 1960 size_t i = 0; 1961 for (const SymbolTableEntry &e : symbols) { 1962 if (e.sym->type == STT_SECTION) 1963 sectionIndexMap[e.sym->getOutputSection()] = ++i; 1964 else 1965 symbolIndexMap[e.sym] = ++i; 1966 } 1967 }); 1968 1969 // Section symbols are mapped based on their output sections 1970 // to maintain their semantics. 1971 if (sym->type == STT_SECTION) 1972 return sectionIndexMap.lookup(sym->getOutputSection()); 1973 return symbolIndexMap.lookup(sym); 1974 } 1975 1976 template <class ELFT> 1977 SymbolTableSection<ELFT>::SymbolTableSection(StringTableSection &strTabSec) 1978 : SymbolTableBaseSection(strTabSec) { 1979 this->entsize = sizeof(Elf_Sym); 1980 } 1981 1982 static BssSection *getCommonSec(Symbol *sym) { 1983 if (!config->defineCommon) 1984 if (auto *d = dyn_cast<Defined>(sym)) 1985 return dyn_cast_or_null<BssSection>(d->section); 1986 return nullptr; 1987 } 1988 1989 static uint32_t getSymSectionIndex(Symbol *sym) { 1990 if (getCommonSec(sym)) 1991 return SHN_COMMON; 1992 if (!isa<Defined>(sym) || sym->needsPltAddr) 1993 return SHN_UNDEF; 1994 if (const OutputSection *os = sym->getOutputSection()) 1995 return os->sectionIndex >= SHN_LORESERVE ? (uint32_t)SHN_XINDEX 1996 : os->sectionIndex; 1997 return SHN_ABS; 1998 } 1999 2000 // Write the internal symbol table contents to the output symbol table. 2001 template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) { 2002 // The first entry is a null entry as per the ELF spec. 2003 memset(buf, 0, sizeof(Elf_Sym)); 2004 buf += sizeof(Elf_Sym); 2005 2006 auto *eSym = reinterpret_cast<Elf_Sym *>(buf); 2007 2008 for (SymbolTableEntry &ent : symbols) { 2009 Symbol *sym = ent.sym; 2010 bool isDefinedHere = type == SHT_SYMTAB || sym->partition == partition; 2011 2012 // Set st_info and st_other. 2013 eSym->st_other = 0; 2014 if (sym->isLocal()) { 2015 eSym->setBindingAndType(STB_LOCAL, sym->type); 2016 } else { 2017 eSym->setBindingAndType(sym->computeBinding(), sym->type); 2018 eSym->setVisibility(sym->visibility); 2019 } 2020 2021 // The 3 most significant bits of st_other are used by OpenPOWER ABI. 2022 // See getPPC64GlobalEntryToLocalEntryOffset() for more details. 2023 if (config->emachine == EM_PPC64) 2024 eSym->st_other |= sym->stOther & 0xe0; 2025 2026 eSym->st_name = ent.strTabOffset; 2027 if (isDefinedHere) 2028 eSym->st_shndx = getSymSectionIndex(ent.sym); 2029 else 2030 eSym->st_shndx = 0; 2031 2032 // Copy symbol size if it is a defined symbol. st_size is not significant 2033 // for undefined symbols, so whether copying it or not is up to us if that's 2034 // the case. We'll leave it as zero because by not setting a value, we can 2035 // get the exact same outputs for two sets of input files that differ only 2036 // in undefined symbol size in DSOs. 2037 if (eSym->st_shndx == SHN_UNDEF || !isDefinedHere) 2038 eSym->st_size = 0; 2039 else 2040 eSym->st_size = sym->getSize(); 2041 2042 // st_value is usually an address of a symbol, but that has a 2043 // special meaining for uninstantiated common symbols (this can 2044 // occur if -r is given). 2045 if (BssSection *commonSec = getCommonSec(ent.sym)) 2046 eSym->st_value = commonSec->alignment; 2047 else if (isDefinedHere) 2048 eSym->st_value = sym->getVA(); 2049 else 2050 eSym->st_value = 0; 2051 2052 ++eSym; 2053 } 2054 2055 // On MIPS we need to mark symbol which has a PLT entry and requires 2056 // pointer equality by STO_MIPS_PLT flag. That is necessary to help 2057 // dynamic linker distinguish such symbols and MIPS lazy-binding stubs. 2058 // https://sourceware.org/ml/binutils/2008-07/txt00000.txt 2059 if (config->emachine == EM_MIPS) { 2060 auto *eSym = reinterpret_cast<Elf_Sym *>(buf); 2061 2062 for (SymbolTableEntry &ent : symbols) { 2063 Symbol *sym = ent.sym; 2064 if (sym->isInPlt() && sym->needsPltAddr) 2065 eSym->st_other |= STO_MIPS_PLT; 2066 if (isMicroMips()) { 2067 // We already set the less-significant bit for symbols 2068 // marked by the `STO_MIPS_MICROMIPS` flag and for microMIPS PLT 2069 // records. That allows us to distinguish such symbols in 2070 // the `MIPS<ELFT>::relocateOne()` routine. Now we should 2071 // clear that bit for non-dynamic symbol table, so tools 2072 // like `objdump` will be able to deal with a correct 2073 // symbol position. 2074 if (sym->isDefined() && 2075 ((sym->stOther & STO_MIPS_MICROMIPS) || sym->needsPltAddr)) { 2076 if (!strTabSec.isDynamic()) 2077 eSym->st_value &= ~1; 2078 eSym->st_other |= STO_MIPS_MICROMIPS; 2079 } 2080 } 2081 if (config->relocatable) 2082 if (auto *d = dyn_cast<Defined>(sym)) 2083 if (isMipsPIC<ELFT>(d)) 2084 eSym->st_other |= STO_MIPS_PIC; 2085 ++eSym; 2086 } 2087 } 2088 } 2089 2090 SymtabShndxSection::SymtabShndxSection() 2091 : SyntheticSection(0, SHT_SYMTAB_SHNDX, 4, ".symtab_shndx") { 2092 this->entsize = 4; 2093 } 2094 2095 void SymtabShndxSection::writeTo(uint8_t *buf) { 2096 // We write an array of 32 bit values, where each value has 1:1 association 2097 // with an entry in .symtab. If the corresponding entry contains SHN_XINDEX, 2098 // we need to write actual index, otherwise, we must write SHN_UNDEF(0). 2099 buf += 4; // Ignore .symtab[0] entry. 2100 for (const SymbolTableEntry &entry : in.symTab->getSymbols()) { 2101 if (getSymSectionIndex(entry.sym) == SHN_XINDEX) 2102 write32(buf, entry.sym->getOutputSection()->sectionIndex); 2103 buf += 4; 2104 } 2105 } 2106 2107 bool SymtabShndxSection::isNeeded() const { 2108 // SHT_SYMTAB can hold symbols with section indices values up to 2109 // SHN_LORESERVE. If we need more, we want to use extension SHT_SYMTAB_SHNDX 2110 // section. Problem is that we reveal the final section indices a bit too 2111 // late, and we do not know them here. For simplicity, we just always create 2112 // a .symtab_shndx section when the amount of output sections is huge. 2113 size_t size = 0; 2114 for (BaseCommand *base : script->sectionCommands) 2115 if (isa<OutputSection>(base)) 2116 ++size; 2117 return size >= SHN_LORESERVE; 2118 } 2119 2120 void SymtabShndxSection::finalizeContents() { 2121 getParent()->link = in.symTab->getParent()->sectionIndex; 2122 } 2123 2124 size_t SymtabShndxSection::getSize() const { 2125 return in.symTab->getNumSymbols() * 4; 2126 } 2127 2128 // .hash and .gnu.hash sections contain on-disk hash tables that map 2129 // symbol names to their dynamic symbol table indices. Their purpose 2130 // is to help the dynamic linker resolve symbols quickly. If ELF files 2131 // don't have them, the dynamic linker has to do linear search on all 2132 // dynamic symbols, which makes programs slower. Therefore, a .hash 2133 // section is added to a DSO by default. A .gnu.hash is added if you 2134 // give the -hash-style=gnu or -hash-style=both option. 2135 // 2136 // The Unix semantics of resolving dynamic symbols is somewhat expensive. 2137 // Each ELF file has a list of DSOs that the ELF file depends on and a 2138 // list of dynamic symbols that need to be resolved from any of the 2139 // DSOs. That means resolving all dynamic symbols takes O(m)*O(n) 2140 // where m is the number of DSOs and n is the number of dynamic 2141 // symbols. For modern large programs, both m and n are large. So 2142 // making each step faster by using hash tables substiantially 2143 // improves time to load programs. 2144 // 2145 // (Note that this is not the only way to design the shared library. 2146 // For instance, the Windows DLL takes a different approach. On 2147 // Windows, each dynamic symbol has a name of DLL from which the symbol 2148 // has to be resolved. That makes the cost of symbol resolution O(n). 2149 // This disables some hacky techniques you can use on Unix such as 2150 // LD_PRELOAD, but this is arguably better semantics than the Unix ones.) 2151 // 2152 // Due to historical reasons, we have two different hash tables, .hash 2153 // and .gnu.hash. They are for the same purpose, and .gnu.hash is a new 2154 // and better version of .hash. .hash is just an on-disk hash table, but 2155 // .gnu.hash has a bloom filter in addition to a hash table to skip 2156 // DSOs very quickly. If you are sure that your dynamic linker knows 2157 // about .gnu.hash, you want to specify -hash-style=gnu. Otherwise, a 2158 // safe bet is to specify -hash-style=both for backward compatibilty. 2159 GnuHashTableSection::GnuHashTableSection() 2160 : SyntheticSection(SHF_ALLOC, SHT_GNU_HASH, config->wordsize, ".gnu.hash") { 2161 } 2162 2163 void GnuHashTableSection::finalizeContents() { 2164 if (OutputSection *sec = getPartition().dynSymTab->getParent()) 2165 getParent()->link = sec->sectionIndex; 2166 2167 // Computes bloom filter size in word size. We want to allocate 12 2168 // bits for each symbol. It must be a power of two. 2169 if (symbols.empty()) { 2170 maskWords = 1; 2171 } else { 2172 uint64_t numBits = symbols.size() * 12; 2173 maskWords = NextPowerOf2(numBits / (config->wordsize * 8)); 2174 } 2175 2176 size = 16; // Header 2177 size += config->wordsize * maskWords; // Bloom filter 2178 size += nBuckets * 4; // Hash buckets 2179 size += symbols.size() * 4; // Hash values 2180 } 2181 2182 void GnuHashTableSection::writeTo(uint8_t *buf) { 2183 // The output buffer is not guaranteed to be zero-cleared because we pre- 2184 // fill executable sections with trap instructions. This is a precaution 2185 // for that case, which happens only when -no-rosegment is given. 2186 memset(buf, 0, size); 2187 2188 // Write a header. 2189 write32(buf, nBuckets); 2190 write32(buf + 4, getPartition().dynSymTab->getNumSymbols() - symbols.size()); 2191 write32(buf + 8, maskWords); 2192 write32(buf + 12, Shift2); 2193 buf += 16; 2194 2195 // Write a bloom filter and a hash table. 2196 writeBloomFilter(buf); 2197 buf += config->wordsize * maskWords; 2198 writeHashTable(buf); 2199 } 2200 2201 // This function writes a 2-bit bloom filter. This bloom filter alone 2202 // usually filters out 80% or more of all symbol lookups [1]. 2203 // The dynamic linker uses the hash table only when a symbol is not 2204 // filtered out by a bloom filter. 2205 // 2206 // [1] Ulrich Drepper (2011), "How To Write Shared Libraries" (Ver. 4.1.2), 2207 // p.9, https://www.akkadia.org/drepper/dsohowto.pdf 2208 void GnuHashTableSection::writeBloomFilter(uint8_t *buf) { 2209 unsigned c = config->is64 ? 64 : 32; 2210 for (const Entry &sym : symbols) { 2211 // When C = 64, we choose a word with bits [6:...] and set 1 to two bits in 2212 // the word using bits [0:5] and [26:31]. 2213 size_t i = (sym.hash / c) & (maskWords - 1); 2214 uint64_t val = readUint(buf + i * config->wordsize); 2215 val |= uint64_t(1) << (sym.hash % c); 2216 val |= uint64_t(1) << ((sym.hash >> Shift2) % c); 2217 writeUint(buf + i * config->wordsize, val); 2218 } 2219 } 2220 2221 void GnuHashTableSection::writeHashTable(uint8_t *buf) { 2222 uint32_t *buckets = reinterpret_cast<uint32_t *>(buf); 2223 uint32_t oldBucket = -1; 2224 uint32_t *values = buckets + nBuckets; 2225 for (auto i = symbols.begin(), e = symbols.end(); i != e; ++i) { 2226 // Write a hash value. It represents a sequence of chains that share the 2227 // same hash modulo value. The last element of each chain is terminated by 2228 // LSB 1. 2229 uint32_t hash = i->hash; 2230 bool isLastInChain = (i + 1) == e || i->bucketIdx != (i + 1)->bucketIdx; 2231 hash = isLastInChain ? hash | 1 : hash & ~1; 2232 write32(values++, hash); 2233 2234 if (i->bucketIdx == oldBucket) 2235 continue; 2236 // Write a hash bucket. Hash buckets contain indices in the following hash 2237 // value table. 2238 write32(buckets + i->bucketIdx, 2239 getPartition().dynSymTab->getSymbolIndex(i->sym)); 2240 oldBucket = i->bucketIdx; 2241 } 2242 } 2243 2244 static uint32_t hashGnu(StringRef name) { 2245 uint32_t h = 5381; 2246 for (uint8_t c : name) 2247 h = (h << 5) + h + c; 2248 return h; 2249 } 2250 2251 // Add symbols to this symbol hash table. Note that this function 2252 // destructively sort a given vector -- which is needed because 2253 // GNU-style hash table places some sorting requirements. 2254 void GnuHashTableSection::addSymbols(std::vector<SymbolTableEntry> &v) { 2255 // We cannot use 'auto' for Mid because GCC 6.1 cannot deduce 2256 // its type correctly. 2257 std::vector<SymbolTableEntry>::iterator mid = 2258 std::stable_partition(v.begin(), v.end(), [&](const SymbolTableEntry &s) { 2259 return !s.sym->isDefined() || s.sym->partition != partition; 2260 }); 2261 2262 // We chose load factor 4 for the on-disk hash table. For each hash 2263 // collision, the dynamic linker will compare a uint32_t hash value. 2264 // Since the integer comparison is quite fast, we believe we can 2265 // make the load factor even larger. 4 is just a conservative choice. 2266 // 2267 // Note that we don't want to create a zero-sized hash table because 2268 // Android loader as of 2018 doesn't like a .gnu.hash containing such 2269 // table. If that's the case, we create a hash table with one unused 2270 // dummy slot. 2271 nBuckets = std::max<size_t>((v.end() - mid) / 4, 1); 2272 2273 if (mid == v.end()) 2274 return; 2275 2276 for (SymbolTableEntry &ent : llvm::make_range(mid, v.end())) { 2277 Symbol *b = ent.sym; 2278 uint32_t hash = hashGnu(b->getName()); 2279 uint32_t bucketIdx = hash % nBuckets; 2280 symbols.push_back({b, ent.strTabOffset, hash, bucketIdx}); 2281 } 2282 2283 llvm::stable_sort(symbols, [](const Entry &l, const Entry &r) { 2284 return l.bucketIdx < r.bucketIdx; 2285 }); 2286 2287 v.erase(mid, v.end()); 2288 for (const Entry &ent : symbols) 2289 v.push_back({ent.sym, ent.strTabOffset}); 2290 } 2291 2292 HashTableSection::HashTableSection() 2293 : SyntheticSection(SHF_ALLOC, SHT_HASH, 4, ".hash") { 2294 this->entsize = 4; 2295 } 2296 2297 void HashTableSection::finalizeContents() { 2298 SymbolTableBaseSection *symTab = getPartition().dynSymTab; 2299 2300 if (OutputSection *sec = symTab->getParent()) 2301 getParent()->link = sec->sectionIndex; 2302 2303 unsigned numEntries = 2; // nbucket and nchain. 2304 numEntries += symTab->getNumSymbols(); // The chain entries. 2305 2306 // Create as many buckets as there are symbols. 2307 numEntries += symTab->getNumSymbols(); 2308 this->size = numEntries * 4; 2309 } 2310 2311 void HashTableSection::writeTo(uint8_t *buf) { 2312 SymbolTableBaseSection *symTab = getPartition().dynSymTab; 2313 2314 // See comment in GnuHashTableSection::writeTo. 2315 memset(buf, 0, size); 2316 2317 unsigned numSymbols = symTab->getNumSymbols(); 2318 2319 uint32_t *p = reinterpret_cast<uint32_t *>(buf); 2320 write32(p++, numSymbols); // nbucket 2321 write32(p++, numSymbols); // nchain 2322 2323 uint32_t *buckets = p; 2324 uint32_t *chains = p + numSymbols; 2325 2326 for (const SymbolTableEntry &s : symTab->getSymbols()) { 2327 Symbol *sym = s.sym; 2328 StringRef name = sym->getName(); 2329 unsigned i = sym->dynsymIndex; 2330 uint32_t hash = hashSysV(name) % numSymbols; 2331 chains[i] = buckets[hash]; 2332 write32(buckets + hash, i); 2333 } 2334 } 2335 2336 // On PowerPC64 the lazy symbol resolvers go into the `global linkage table` 2337 // in the .glink section, rather then the typical .plt section. 2338 PltSection::PltSection(bool isIplt) 2339 : SyntheticSection( 2340 SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, 2341 (config->emachine == EM_PPC || config->emachine == EM_PPC64) 2342 ? ".glink" 2343 : ".plt"), 2344 headerSize(!isIplt || config->zRetpolineplt ? target->pltHeaderSize : 0), 2345 isIplt(isIplt) { 2346 // The PLT needs to be writable on SPARC as the dynamic linker will 2347 // modify the instructions in the PLT entries. 2348 if (config->emachine == EM_SPARCV9) 2349 this->flags |= SHF_WRITE; 2350 } 2351 2352 void PltSection::writeTo(uint8_t *buf) { 2353 if (config->emachine == EM_PPC) { 2354 writePPC32GlinkSection(buf, entries.size()); 2355 return; 2356 } 2357 2358 // At beginning of PLT or retpoline IPLT, we have code to call the dynamic 2359 // linker to resolve dynsyms at runtime. Write such code. 2360 if (headerSize) 2361 target->writePltHeader(buf); 2362 size_t off = headerSize; 2363 2364 RelocationBaseSection *relSec = isIplt ? in.relaIplt : in.relaPlt; 2365 2366 // The IPlt is immediately after the Plt, account for this in relOff 2367 size_t pltOff = isIplt ? in.plt->getSize() : 0; 2368 2369 for (size_t i = 0, e = entries.size(); i != e; ++i) { 2370 const Symbol *b = entries[i]; 2371 unsigned relOff = relSec->entsize * i + pltOff; 2372 uint64_t got = b->getGotPltVA(); 2373 uint64_t plt = this->getVA() + off; 2374 target->writePlt(buf + off, got, plt, b->pltIndex, relOff); 2375 off += target->pltEntrySize; 2376 } 2377 } 2378 2379 template <class ELFT> void PltSection::addEntry(Symbol &sym) { 2380 sym.pltIndex = entries.size(); 2381 entries.push_back(&sym); 2382 } 2383 2384 size_t PltSection::getSize() const { 2385 return headerSize + entries.size() * target->pltEntrySize; 2386 } 2387 2388 // Some architectures such as additional symbols in the PLT section. For 2389 // example ARM uses mapping symbols to aid disassembly 2390 void PltSection::addSymbols() { 2391 // The PLT may have symbols defined for the Header, the IPLT has no header 2392 if (!isIplt) 2393 target->addPltHeaderSymbols(*this); 2394 2395 size_t off = headerSize; 2396 for (size_t i = 0; i < entries.size(); ++i) { 2397 target->addPltSymbols(*this, off); 2398 off += target->pltEntrySize; 2399 } 2400 } 2401 2402 // The string hash function for .gdb_index. 2403 static uint32_t computeGdbHash(StringRef s) { 2404 uint32_t h = 0; 2405 for (uint8_t c : s) 2406 h = h * 67 + toLower(c) - 113; 2407 return h; 2408 } 2409 2410 GdbIndexSection::GdbIndexSection() 2411 : SyntheticSection(0, SHT_PROGBITS, 1, ".gdb_index") {} 2412 2413 // Returns the desired size of an on-disk hash table for a .gdb_index section. 2414 // There's a tradeoff between size and collision rate. We aim 75% utilization. 2415 size_t GdbIndexSection::computeSymtabSize() const { 2416 return std::max<size_t>(NextPowerOf2(symbols.size() * 4 / 3), 1024); 2417 } 2418 2419 // Compute the output section size. 2420 void GdbIndexSection::initOutputSize() { 2421 size = sizeof(GdbIndexHeader) + computeSymtabSize() * 8; 2422 2423 for (GdbChunk &chunk : chunks) 2424 size += chunk.compilationUnits.size() * 16 + chunk.addressAreas.size() * 20; 2425 2426 // Add the constant pool size if exists. 2427 if (!symbols.empty()) { 2428 GdbSymbol &sym = symbols.back(); 2429 size += sym.nameOff + sym.name.size() + 1; 2430 } 2431 } 2432 2433 static std::vector<InputSection *> getDebugInfoSections() { 2434 std::vector<InputSection *> ret; 2435 for (InputSectionBase *s : inputSections) 2436 if (InputSection *isec = dyn_cast<InputSection>(s)) 2437 if (isec->name == ".debug_info") 2438 ret.push_back(isec); 2439 return ret; 2440 } 2441 2442 static std::vector<GdbIndexSection::CuEntry> readCuList(DWARFContext &dwarf) { 2443 std::vector<GdbIndexSection::CuEntry> ret; 2444 for (std::unique_ptr<DWARFUnit> &cu : dwarf.compile_units()) 2445 ret.push_back({cu->getOffset(), cu->getLength() + 4}); 2446 return ret; 2447 } 2448 2449 static std::vector<GdbIndexSection::AddressEntry> 2450 readAddressAreas(DWARFContext &dwarf, InputSection *sec) { 2451 std::vector<GdbIndexSection::AddressEntry> ret; 2452 2453 uint32_t cuIdx = 0; 2454 for (std::unique_ptr<DWARFUnit> &cu : dwarf.compile_units()) { 2455 Expected<DWARFAddressRangesVector> ranges = cu->collectAddressRanges(); 2456 if (!ranges) { 2457 error(toString(sec) + ": " + toString(ranges.takeError())); 2458 return {}; 2459 } 2460 2461 ArrayRef<InputSectionBase *> sections = sec->file->getSections(); 2462 for (DWARFAddressRange &r : *ranges) { 2463 if (r.SectionIndex == -1ULL) 2464 continue; 2465 InputSectionBase *s = sections[r.SectionIndex]; 2466 if (!s || s == &InputSection::discarded || !s->isLive()) 2467 continue; 2468 // Range list with zero size has no effect. 2469 if (r.LowPC == r.HighPC) 2470 continue; 2471 auto *isec = cast<InputSection>(s); 2472 uint64_t offset = isec->getOffsetInFile(); 2473 ret.push_back({isec, r.LowPC - offset, r.HighPC - offset, cuIdx}); 2474 } 2475 ++cuIdx; 2476 } 2477 2478 return ret; 2479 } 2480 2481 template <class ELFT> 2482 static std::vector<GdbIndexSection::NameAttrEntry> 2483 readPubNamesAndTypes(const LLDDwarfObj<ELFT> &obj, 2484 const std::vector<GdbIndexSection::CuEntry> &cUs) { 2485 const DWARFSection &pubNames = obj.getGnuPubNamesSection(); 2486 const DWARFSection &pubTypes = obj.getGnuPubTypesSection(); 2487 2488 std::vector<GdbIndexSection::NameAttrEntry> ret; 2489 for (const DWARFSection *pub : {&pubNames, &pubTypes}) { 2490 DWARFDebugPubTable table(obj, *pub, config->isLE, true); 2491 for (const DWARFDebugPubTable::Set &set : table.getData()) { 2492 // The value written into the constant pool is kind << 24 | cuIndex. As we 2493 // don't know how many compilation units precede this object to compute 2494 // cuIndex, we compute (kind << 24 | cuIndexInThisObject) instead, and add 2495 // the number of preceding compilation units later. 2496 uint32_t i = 2497 lower_bound(cUs, set.Offset, 2498 [](GdbIndexSection::CuEntry cu, uint32_t offset) { 2499 return cu.cuOffset < offset; 2500 }) - 2501 cUs.begin(); 2502 for (const DWARFDebugPubTable::Entry &ent : set.Entries) 2503 ret.push_back({{ent.Name, computeGdbHash(ent.Name)}, 2504 (ent.Descriptor.toBits() << 24) | i}); 2505 } 2506 } 2507 return ret; 2508 } 2509 2510 // Create a list of symbols from a given list of symbol names and types 2511 // by uniquifying them by name. 2512 static std::vector<GdbIndexSection::GdbSymbol> 2513 createSymbols(ArrayRef<std::vector<GdbIndexSection::NameAttrEntry>> nameAttrs, 2514 const std::vector<GdbIndexSection::GdbChunk> &chunks) { 2515 using GdbSymbol = GdbIndexSection::GdbSymbol; 2516 using NameAttrEntry = GdbIndexSection::NameAttrEntry; 2517 2518 // For each chunk, compute the number of compilation units preceding it. 2519 uint32_t cuIdx = 0; 2520 std::vector<uint32_t> cuIdxs(chunks.size()); 2521 for (uint32_t i = 0, e = chunks.size(); i != e; ++i) { 2522 cuIdxs[i] = cuIdx; 2523 cuIdx += chunks[i].compilationUnits.size(); 2524 } 2525 2526 // The number of symbols we will handle in this function is of the order 2527 // of millions for very large executables, so we use multi-threading to 2528 // speed it up. 2529 size_t numShards = 32; 2530 size_t concurrency = 1; 2531 if (threadsEnabled) 2532 concurrency = 2533 std::min<size_t>(PowerOf2Floor(hardware_concurrency()), numShards); 2534 2535 // A sharded map to uniquify symbols by name. 2536 std::vector<DenseMap<CachedHashStringRef, size_t>> map(numShards); 2537 size_t shift = 32 - countTrailingZeros(numShards); 2538 2539 // Instantiate GdbSymbols while uniqufying them by name. 2540 std::vector<std::vector<GdbSymbol>> symbols(numShards); 2541 parallelForEachN(0, concurrency, [&](size_t threadId) { 2542 uint32_t i = 0; 2543 for (ArrayRef<NameAttrEntry> entries : nameAttrs) { 2544 for (const NameAttrEntry &ent : entries) { 2545 size_t shardId = ent.name.hash() >> shift; 2546 if ((shardId & (concurrency - 1)) != threadId) 2547 continue; 2548 2549 uint32_t v = ent.cuIndexAndAttrs + cuIdxs[i]; 2550 size_t &idx = map[shardId][ent.name]; 2551 if (idx) { 2552 symbols[shardId][idx - 1].cuVector.push_back(v); 2553 continue; 2554 } 2555 2556 idx = symbols[shardId].size() + 1; 2557 symbols[shardId].push_back({ent.name, {v}, 0, 0}); 2558 } 2559 ++i; 2560 } 2561 }); 2562 2563 size_t numSymbols = 0; 2564 for (ArrayRef<GdbSymbol> v : symbols) 2565 numSymbols += v.size(); 2566 2567 // The return type is a flattened vector, so we'll copy each vector 2568 // contents to Ret. 2569 std::vector<GdbSymbol> ret; 2570 ret.reserve(numSymbols); 2571 for (std::vector<GdbSymbol> &vec : symbols) 2572 for (GdbSymbol &sym : vec) 2573 ret.push_back(std::move(sym)); 2574 2575 // CU vectors and symbol names are adjacent in the output file. 2576 // We can compute their offsets in the output file now. 2577 size_t off = 0; 2578 for (GdbSymbol &sym : ret) { 2579 sym.cuVectorOff = off; 2580 off += (sym.cuVector.size() + 1) * 4; 2581 } 2582 for (GdbSymbol &sym : ret) { 2583 sym.nameOff = off; 2584 off += sym.name.size() + 1; 2585 } 2586 2587 return ret; 2588 } 2589 2590 // Returns a newly-created .gdb_index section. 2591 template <class ELFT> GdbIndexSection *GdbIndexSection::create() { 2592 std::vector<InputSection *> sections = getDebugInfoSections(); 2593 2594 // .debug_gnu_pub{names,types} are useless in executables. 2595 // They are present in input object files solely for creating 2596 // a .gdb_index. So we can remove them from the output. 2597 for (InputSectionBase *s : inputSections) 2598 if (s->name == ".debug_gnu_pubnames" || s->name == ".debug_gnu_pubtypes") 2599 s->markDead(); 2600 2601 std::vector<GdbChunk> chunks(sections.size()); 2602 std::vector<std::vector<NameAttrEntry>> nameAttrs(sections.size()); 2603 2604 parallelForEachN(0, sections.size(), [&](size_t i) { 2605 ObjFile<ELFT> *file = sections[i]->getFile<ELFT>(); 2606 DWARFContext dwarf(make_unique<LLDDwarfObj<ELFT>>(file)); 2607 2608 chunks[i].sec = sections[i]; 2609 chunks[i].compilationUnits = readCuList(dwarf); 2610 chunks[i].addressAreas = readAddressAreas(dwarf, sections[i]); 2611 nameAttrs[i] = readPubNamesAndTypes<ELFT>( 2612 static_cast<const LLDDwarfObj<ELFT> &>(dwarf.getDWARFObj()), 2613 chunks[i].compilationUnits); 2614 }); 2615 2616 auto *ret = make<GdbIndexSection>(); 2617 ret->chunks = std::move(chunks); 2618 ret->symbols = createSymbols(nameAttrs, ret->chunks); 2619 ret->initOutputSize(); 2620 return ret; 2621 } 2622 2623 void GdbIndexSection::writeTo(uint8_t *buf) { 2624 // Write the header. 2625 auto *hdr = reinterpret_cast<GdbIndexHeader *>(buf); 2626 uint8_t *start = buf; 2627 hdr->version = 7; 2628 buf += sizeof(*hdr); 2629 2630 // Write the CU list. 2631 hdr->cuListOff = buf - start; 2632 for (GdbChunk &chunk : chunks) { 2633 for (CuEntry &cu : chunk.compilationUnits) { 2634 write64le(buf, chunk.sec->outSecOff + cu.cuOffset); 2635 write64le(buf + 8, cu.cuLength); 2636 buf += 16; 2637 } 2638 } 2639 2640 // Write the address area. 2641 hdr->cuTypesOff = buf - start; 2642 hdr->addressAreaOff = buf - start; 2643 uint32_t cuOff = 0; 2644 for (GdbChunk &chunk : chunks) { 2645 for (AddressEntry &e : chunk.addressAreas) { 2646 uint64_t baseAddr = e.section->getVA(0); 2647 write64le(buf, baseAddr + e.lowAddress); 2648 write64le(buf + 8, baseAddr + e.highAddress); 2649 write32le(buf + 16, e.cuIndex + cuOff); 2650 buf += 20; 2651 } 2652 cuOff += chunk.compilationUnits.size(); 2653 } 2654 2655 // Write the on-disk open-addressing hash table containing symbols. 2656 hdr->symtabOff = buf - start; 2657 size_t symtabSize = computeSymtabSize(); 2658 uint32_t mask = symtabSize - 1; 2659 2660 for (GdbSymbol &sym : symbols) { 2661 uint32_t h = sym.name.hash(); 2662 uint32_t i = h & mask; 2663 uint32_t step = ((h * 17) & mask) | 1; 2664 2665 while (read32le(buf + i * 8)) 2666 i = (i + step) & mask; 2667 2668 write32le(buf + i * 8, sym.nameOff); 2669 write32le(buf + i * 8 + 4, sym.cuVectorOff); 2670 } 2671 2672 buf += symtabSize * 8; 2673 2674 // Write the string pool. 2675 hdr->constantPoolOff = buf - start; 2676 parallelForEach(symbols, [&](GdbSymbol &sym) { 2677 memcpy(buf + sym.nameOff, sym.name.data(), sym.name.size()); 2678 }); 2679 2680 // Write the CU vectors. 2681 for (GdbSymbol &sym : symbols) { 2682 write32le(buf, sym.cuVector.size()); 2683 buf += 4; 2684 for (uint32_t val : sym.cuVector) { 2685 write32le(buf, val); 2686 buf += 4; 2687 } 2688 } 2689 } 2690 2691 bool GdbIndexSection::isNeeded() const { return !chunks.empty(); } 2692 2693 EhFrameHeader::EhFrameHeader() 2694 : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".eh_frame_hdr") {} 2695 2696 void EhFrameHeader::writeTo(uint8_t *buf) { 2697 // Unlike most sections, the EhFrameHeader section is written while writing 2698 // another section, namely EhFrameSection, which calls the write() function 2699 // below from its writeTo() function. This is necessary because the contents 2700 // of EhFrameHeader depend on the relocated contents of EhFrameSection and we 2701 // don't know which order the sections will be written in. 2702 } 2703 2704 // .eh_frame_hdr contains a binary search table of pointers to FDEs. 2705 // Each entry of the search table consists of two values, 2706 // the starting PC from where FDEs covers, and the FDE's address. 2707 // It is sorted by PC. 2708 void EhFrameHeader::write() { 2709 uint8_t *buf = Out::bufferStart + getParent()->offset + outSecOff; 2710 using FdeData = EhFrameSection::FdeData; 2711 2712 std::vector<FdeData> fdes = getPartition().ehFrame->getFdeData(); 2713 2714 buf[0] = 1; 2715 buf[1] = DW_EH_PE_pcrel | DW_EH_PE_sdata4; 2716 buf[2] = DW_EH_PE_udata4; 2717 buf[3] = DW_EH_PE_datarel | DW_EH_PE_sdata4; 2718 write32(buf + 4, 2719 getPartition().ehFrame->getParent()->addr - this->getVA() - 4); 2720 write32(buf + 8, fdes.size()); 2721 buf += 12; 2722 2723 for (FdeData &fde : fdes) { 2724 write32(buf, fde.pcRel); 2725 write32(buf + 4, fde.fdeVARel); 2726 buf += 8; 2727 } 2728 } 2729 2730 size_t EhFrameHeader::getSize() const { 2731 // .eh_frame_hdr has a 12 bytes header followed by an array of FDEs. 2732 return 12 + getPartition().ehFrame->numFdes * 8; 2733 } 2734 2735 bool EhFrameHeader::isNeeded() const { 2736 return isLive() && getPartition().ehFrame->isNeeded(); 2737 } 2738 2739 VersionDefinitionSection::VersionDefinitionSection() 2740 : SyntheticSection(SHF_ALLOC, SHT_GNU_verdef, sizeof(uint32_t), 2741 ".gnu.version_d") {} 2742 2743 StringRef VersionDefinitionSection::getFileDefName() { 2744 if (!getPartition().name.empty()) 2745 return getPartition().name; 2746 if (!config->soName.empty()) 2747 return config->soName; 2748 return config->outputFile; 2749 } 2750 2751 void VersionDefinitionSection::finalizeContents() { 2752 fileDefNameOff = getPartition().dynStrTab->addString(getFileDefName()); 2753 for (VersionDefinition &v : config->versionDefinitions) 2754 verDefNameOffs.push_back(getPartition().dynStrTab->addString(v.name)); 2755 2756 if (OutputSection *sec = getPartition().dynStrTab->getParent()) 2757 getParent()->link = sec->sectionIndex; 2758 2759 // sh_info should be set to the number of definitions. This fact is missed in 2760 // documentation, but confirmed by binutils community: 2761 // https://sourceware.org/ml/binutils/2014-11/msg00355.html 2762 getParent()->info = getVerDefNum(); 2763 } 2764 2765 void VersionDefinitionSection::writeOne(uint8_t *buf, uint32_t index, 2766 StringRef name, size_t nameOff) { 2767 uint16_t flags = index == 1 ? VER_FLG_BASE : 0; 2768 2769 // Write a verdef. 2770 write16(buf, 1); // vd_version 2771 write16(buf + 2, flags); // vd_flags 2772 write16(buf + 4, index); // vd_ndx 2773 write16(buf + 6, 1); // vd_cnt 2774 write32(buf + 8, hashSysV(name)); // vd_hash 2775 write32(buf + 12, 20); // vd_aux 2776 write32(buf + 16, 28); // vd_next 2777 2778 // Write a veraux. 2779 write32(buf + 20, nameOff); // vda_name 2780 write32(buf + 24, 0); // vda_next 2781 } 2782 2783 void VersionDefinitionSection::writeTo(uint8_t *buf) { 2784 writeOne(buf, 1, getFileDefName(), fileDefNameOff); 2785 2786 auto nameOffIt = verDefNameOffs.begin(); 2787 for (VersionDefinition &v : config->versionDefinitions) { 2788 buf += EntrySize; 2789 writeOne(buf, v.id, v.name, *nameOffIt++); 2790 } 2791 2792 // Need to terminate the last version definition. 2793 write32(buf + 16, 0); // vd_next 2794 } 2795 2796 size_t VersionDefinitionSection::getSize() const { 2797 return EntrySize * getVerDefNum(); 2798 } 2799 2800 // .gnu.version is a table where each entry is 2 byte long. 2801 VersionTableSection::VersionTableSection() 2802 : SyntheticSection(SHF_ALLOC, SHT_GNU_versym, sizeof(uint16_t), 2803 ".gnu.version") { 2804 this->entsize = 2; 2805 } 2806 2807 void VersionTableSection::finalizeContents() { 2808 // At the moment of june 2016 GNU docs does not mention that sh_link field 2809 // should be set, but Sun docs do. Also readelf relies on this field. 2810 getParent()->link = getPartition().dynSymTab->getParent()->sectionIndex; 2811 } 2812 2813 size_t VersionTableSection::getSize() const { 2814 return (getPartition().dynSymTab->getSymbols().size() + 1) * 2; 2815 } 2816 2817 void VersionTableSection::writeTo(uint8_t *buf) { 2818 buf += 2; 2819 for (const SymbolTableEntry &s : getPartition().dynSymTab->getSymbols()) { 2820 write16(buf, s.sym->versionId); 2821 buf += 2; 2822 } 2823 } 2824 2825 bool VersionTableSection::isNeeded() const { 2826 return getPartition().verDef || getPartition().verNeed->isNeeded(); 2827 } 2828 2829 void elf::addVerneed(Symbol *ss) { 2830 auto &file = cast<SharedFile>(*ss->file); 2831 if (ss->verdefIndex == VER_NDX_GLOBAL) { 2832 ss->versionId = VER_NDX_GLOBAL; 2833 return; 2834 } 2835 2836 if (file.vernauxs.empty()) 2837 file.vernauxs.resize(file.verdefs.size()); 2838 2839 // Select a version identifier for the vernaux data structure, if we haven't 2840 // already allocated one. The verdef identifiers cover the range 2841 // [1..getVerDefNum()]; this causes the vernaux identifiers to start from 2842 // getVerDefNum()+1. 2843 if (file.vernauxs[ss->verdefIndex] == 0) 2844 file.vernauxs[ss->verdefIndex] = ++SharedFile::vernauxNum + getVerDefNum(); 2845 2846 ss->versionId = file.vernauxs[ss->verdefIndex]; 2847 } 2848 2849 template <class ELFT> 2850 VersionNeedSection<ELFT>::VersionNeedSection() 2851 : SyntheticSection(SHF_ALLOC, SHT_GNU_verneed, sizeof(uint32_t), 2852 ".gnu.version_r") {} 2853 2854 template <class ELFT> void VersionNeedSection<ELFT>::finalizeContents() { 2855 for (SharedFile *f : sharedFiles) { 2856 if (f->vernauxs.empty()) 2857 continue; 2858 verneeds.emplace_back(); 2859 Verneed &vn = verneeds.back(); 2860 vn.nameStrTab = getPartition().dynStrTab->addString(f->soName); 2861 for (unsigned i = 0; i != f->vernauxs.size(); ++i) { 2862 if (f->vernauxs[i] == 0) 2863 continue; 2864 auto *verdef = 2865 reinterpret_cast<const typename ELFT::Verdef *>(f->verdefs[i]); 2866 vn.vernauxs.push_back( 2867 {verdef->vd_hash, f->vernauxs[i], 2868 getPartition().dynStrTab->addString(f->getStringTable().data() + 2869 verdef->getAux()->vda_name)}); 2870 } 2871 } 2872 2873 if (OutputSection *sec = getPartition().dynStrTab->getParent()) 2874 getParent()->link = sec->sectionIndex; 2875 getParent()->info = verneeds.size(); 2876 } 2877 2878 template <class ELFT> void VersionNeedSection<ELFT>::writeTo(uint8_t *buf) { 2879 // The Elf_Verneeds need to appear first, followed by the Elf_Vernauxs. 2880 auto *verneed = reinterpret_cast<Elf_Verneed *>(buf); 2881 auto *vernaux = reinterpret_cast<Elf_Vernaux *>(verneed + verneeds.size()); 2882 2883 for (auto &vn : verneeds) { 2884 // Create an Elf_Verneed for this DSO. 2885 verneed->vn_version = 1; 2886 verneed->vn_cnt = vn.vernauxs.size(); 2887 verneed->vn_file = vn.nameStrTab; 2888 verneed->vn_aux = 2889 reinterpret_cast<char *>(vernaux) - reinterpret_cast<char *>(verneed); 2890 verneed->vn_next = sizeof(Elf_Verneed); 2891 ++verneed; 2892 2893 // Create the Elf_Vernauxs for this Elf_Verneed. 2894 for (auto &vna : vn.vernauxs) { 2895 vernaux->vna_hash = vna.hash; 2896 vernaux->vna_flags = 0; 2897 vernaux->vna_other = vna.verneedIndex; 2898 vernaux->vna_name = vna.nameStrTab; 2899 vernaux->vna_next = sizeof(Elf_Vernaux); 2900 ++vernaux; 2901 } 2902 2903 vernaux[-1].vna_next = 0; 2904 } 2905 verneed[-1].vn_next = 0; 2906 } 2907 2908 template <class ELFT> size_t VersionNeedSection<ELFT>::getSize() const { 2909 return verneeds.size() * sizeof(Elf_Verneed) + 2910 SharedFile::vernauxNum * sizeof(Elf_Vernaux); 2911 } 2912 2913 template <class ELFT> bool VersionNeedSection<ELFT>::isNeeded() const { 2914 return SharedFile::vernauxNum != 0; 2915 } 2916 2917 void MergeSyntheticSection::addSection(MergeInputSection *ms) { 2918 ms->parent = this; 2919 sections.push_back(ms); 2920 assert(alignment == ms->alignment || !(ms->flags & SHF_STRINGS)); 2921 alignment = std::max(alignment, ms->alignment); 2922 } 2923 2924 MergeTailSection::MergeTailSection(StringRef name, uint32_t type, 2925 uint64_t flags, uint32_t alignment) 2926 : MergeSyntheticSection(name, type, flags, alignment), 2927 builder(StringTableBuilder::RAW, alignment) {} 2928 2929 size_t MergeTailSection::getSize() const { return builder.getSize(); } 2930 2931 void MergeTailSection::writeTo(uint8_t *buf) { builder.write(buf); } 2932 2933 void MergeTailSection::finalizeContents() { 2934 // Add all string pieces to the string table builder to create section 2935 // contents. 2936 for (MergeInputSection *sec : sections) 2937 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i) 2938 if (sec->pieces[i].live) 2939 builder.add(sec->getData(i)); 2940 2941 // Fix the string table content. After this, the contents will never change. 2942 builder.finalize(); 2943 2944 // finalize() fixed tail-optimized strings, so we can now get 2945 // offsets of strings. Get an offset for each string and save it 2946 // to a corresponding SectionPiece for easy access. 2947 for (MergeInputSection *sec : sections) 2948 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i) 2949 if (sec->pieces[i].live) 2950 sec->pieces[i].outputOff = builder.getOffset(sec->getData(i)); 2951 } 2952 2953 void MergeNoTailSection::writeTo(uint8_t *buf) { 2954 for (size_t i = 0; i < numShards; ++i) 2955 shards[i].write(buf + shardOffsets[i]); 2956 } 2957 2958 // This function is very hot (i.e. it can take several seconds to finish) 2959 // because sometimes the number of inputs is in an order of magnitude of 2960 // millions. So, we use multi-threading. 2961 // 2962 // For any strings S and T, we know S is not mergeable with T if S's hash 2963 // value is different from T's. If that's the case, we can safely put S and 2964 // T into different string builders without worrying about merge misses. 2965 // We do it in parallel. 2966 void MergeNoTailSection::finalizeContents() { 2967 // Initializes string table builders. 2968 for (size_t i = 0; i < numShards; ++i) 2969 shards.emplace_back(StringTableBuilder::RAW, alignment); 2970 2971 // Concurrency level. Must be a power of 2 to avoid expensive modulo 2972 // operations in the following tight loop. 2973 size_t concurrency = 1; 2974 if (threadsEnabled) 2975 concurrency = 2976 std::min<size_t>(PowerOf2Floor(hardware_concurrency()), numShards); 2977 2978 // Add section pieces to the builders. 2979 parallelForEachN(0, concurrency, [&](size_t threadId) { 2980 for (MergeInputSection *sec : sections) { 2981 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i) { 2982 if (!sec->pieces[i].live) 2983 continue; 2984 size_t shardId = getShardId(sec->pieces[i].hash); 2985 if ((shardId & (concurrency - 1)) == threadId) 2986 sec->pieces[i].outputOff = shards[shardId].add(sec->getData(i)); 2987 } 2988 } 2989 }); 2990 2991 // Compute an in-section offset for each shard. 2992 size_t off = 0; 2993 for (size_t i = 0; i < numShards; ++i) { 2994 shards[i].finalizeInOrder(); 2995 if (shards[i].getSize() > 0) 2996 off = alignTo(off, alignment); 2997 shardOffsets[i] = off; 2998 off += shards[i].getSize(); 2999 } 3000 size = off; 3001 3002 // So far, section pieces have offsets from beginning of shards, but 3003 // we want offsets from beginning of the whole section. Fix them. 3004 parallelForEach(sections, [&](MergeInputSection *sec) { 3005 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i) 3006 if (sec->pieces[i].live) 3007 sec->pieces[i].outputOff += 3008 shardOffsets[getShardId(sec->pieces[i].hash)]; 3009 }); 3010 } 3011 3012 static MergeSyntheticSection *createMergeSynthetic(StringRef name, 3013 uint32_t type, 3014 uint64_t flags, 3015 uint32_t alignment) { 3016 bool shouldTailMerge = (flags & SHF_STRINGS) && config->optimize >= 2; 3017 if (shouldTailMerge) 3018 return make<MergeTailSection>(name, type, flags, alignment); 3019 return make<MergeNoTailSection>(name, type, flags, alignment); 3020 } 3021 3022 template <class ELFT> void elf::splitSections() { 3023 // splitIntoPieces needs to be called on each MergeInputSection 3024 // before calling finalizeContents(). 3025 parallelForEach(inputSections, [](InputSectionBase *sec) { 3026 if (auto *s = dyn_cast<MergeInputSection>(sec)) 3027 s->splitIntoPieces(); 3028 else if (auto *eh = dyn_cast<EhInputSection>(sec)) 3029 eh->split<ELFT>(); 3030 }); 3031 } 3032 3033 // This function scans over the inputsections to create mergeable 3034 // synthetic sections. 3035 // 3036 // It removes MergeInputSections from the input section array and adds 3037 // new synthetic sections at the location of the first input section 3038 // that it replaces. It then finalizes each synthetic section in order 3039 // to compute an output offset for each piece of each input section. 3040 void elf::mergeSections() { 3041 std::vector<MergeSyntheticSection *> mergeSections; 3042 for (InputSectionBase *&s : inputSections) { 3043 MergeInputSection *ms = dyn_cast<MergeInputSection>(s); 3044 if (!ms) 3045 continue; 3046 3047 // We do not want to handle sections that are not alive, so just remove 3048 // them instead of trying to merge. 3049 if (!ms->isLive()) { 3050 s = nullptr; 3051 continue; 3052 } 3053 3054 StringRef outsecName = getOutputSectionName(ms); 3055 3056 auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) { 3057 // While we could create a single synthetic section for two different 3058 // values of Entsize, it is better to take Entsize into consideration. 3059 // 3060 // With a single synthetic section no two pieces with different Entsize 3061 // could be equal, so we may as well have two sections. 3062 // 3063 // Using Entsize in here also allows us to propagate it to the synthetic 3064 // section. 3065 // 3066 // SHF_STRINGS section with different alignments should not be merged. 3067 return sec->name == outsecName && sec->flags == ms->flags && 3068 sec->entsize == ms->entsize && 3069 (sec->alignment == ms->alignment || !(sec->flags & SHF_STRINGS)); 3070 }); 3071 if (i == mergeSections.end()) { 3072 MergeSyntheticSection *syn = 3073 createMergeSynthetic(outsecName, ms->type, ms->flags, ms->alignment); 3074 mergeSections.push_back(syn); 3075 i = std::prev(mergeSections.end()); 3076 s = syn; 3077 syn->entsize = ms->entsize; 3078 } else { 3079 s = nullptr; 3080 } 3081 (*i)->addSection(ms); 3082 } 3083 for (auto *ms : mergeSections) 3084 ms->finalizeContents(); 3085 3086 std::vector<InputSectionBase *> &v = inputSections; 3087 v.erase(std::remove(v.begin(), v.end(), nullptr), v.end()); 3088 } 3089 3090 MipsRldMapSection::MipsRldMapSection() 3091 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, config->wordsize, 3092 ".rld_map") {} 3093 3094 ARMExidxSyntheticSection::ARMExidxSyntheticSection() 3095 : SyntheticSection(SHF_ALLOC | SHF_LINK_ORDER, SHT_ARM_EXIDX, 3096 config->wordsize, ".ARM.exidx") {} 3097 3098 static InputSection *findExidxSection(InputSection *isec) { 3099 for (InputSection *d : isec->dependentSections) 3100 if (d->type == SHT_ARM_EXIDX) 3101 return d; 3102 return nullptr; 3103 } 3104 3105 bool ARMExidxSyntheticSection::addSection(InputSection *isec) { 3106 if (isec->type == SHT_ARM_EXIDX) { 3107 exidxSections.push_back(isec); 3108 return true; 3109 } 3110 3111 if ((isec->flags & SHF_ALLOC) && (isec->flags & SHF_EXECINSTR) && 3112 isec->getSize() > 0) { 3113 executableSections.push_back(isec); 3114 if (empty && findExidxSection(isec)) 3115 empty = false; 3116 return false; 3117 } 3118 3119 // FIXME: we do not output a relocation section when --emit-relocs is used 3120 // as we do not have relocation sections for linker generated table entries 3121 // and we would have to erase at a late stage relocations from merged entries. 3122 // Given that exception tables are already position independent and a binary 3123 // analyzer could derive the relocations we choose to erase the relocations. 3124 if (config->emitRelocs && isec->type == SHT_REL) 3125 if (InputSectionBase *ex = isec->getRelocatedSection()) 3126 if (isa<InputSection>(ex) && ex->type == SHT_ARM_EXIDX) 3127 return true; 3128 3129 return false; 3130 } 3131 3132 // References to .ARM.Extab Sections have bit 31 clear and are not the 3133 // special EXIDX_CANTUNWIND bit-pattern. 3134 static bool isExtabRef(uint32_t unwind) { 3135 return (unwind & 0x80000000) == 0 && unwind != 0x1; 3136 } 3137 3138 // Return true if the .ARM.exidx section Cur can be merged into the .ARM.exidx 3139 // section Prev, where Cur follows Prev in the table. This can be done if the 3140 // unwinding instructions in Cur are identical to Prev. Linker generated 3141 // EXIDX_CANTUNWIND entries are represented by nullptr as they do not have an 3142 // InputSection. 3143 static bool isDuplicateArmExidxSec(InputSection *prev, InputSection *cur) { 3144 3145 struct ExidxEntry { 3146 ulittle32_t fn; 3147 ulittle32_t unwind; 3148 }; 3149 // Get the last table Entry from the previous .ARM.exidx section. If Prev is 3150 // nullptr then it will be a synthesized EXIDX_CANTUNWIND entry. 3151 ExidxEntry prevEntry = {ulittle32_t(0), ulittle32_t(1)}; 3152 if (prev) 3153 prevEntry = prev->getDataAs<ExidxEntry>().back(); 3154 if (isExtabRef(prevEntry.unwind)) 3155 return false; 3156 3157 // We consider the unwind instructions of an .ARM.exidx table entry 3158 // a duplicate if the previous unwind instructions if: 3159 // - Both are the special EXIDX_CANTUNWIND. 3160 // - Both are the same inline unwind instructions. 3161 // We do not attempt to follow and check links into .ARM.extab tables as 3162 // consecutive identical entries are rare and the effort to check that they 3163 // are identical is high. 3164 3165 // If Cur is nullptr then this is synthesized EXIDX_CANTUNWIND entry. 3166 if (cur == nullptr) 3167 return prevEntry.unwind == 1; 3168 3169 for (const ExidxEntry entry : cur->getDataAs<ExidxEntry>()) 3170 if (isExtabRef(entry.unwind) || entry.unwind != prevEntry.unwind) 3171 return false; 3172 3173 // All table entries in this .ARM.exidx Section can be merged into the 3174 // previous Section. 3175 return true; 3176 } 3177 3178 // The .ARM.exidx table must be sorted in ascending order of the address of the 3179 // functions the table describes. Optionally duplicate adjacent table entries 3180 // can be removed. At the end of the function the executableSections must be 3181 // sorted in ascending order of address, Sentinel is set to the InputSection 3182 // with the highest address and any InputSections that have mergeable 3183 // .ARM.exidx table entries are removed from it. 3184 void ARMExidxSyntheticSection::finalizeContents() { 3185 if (script->hasSectionsCommand) { 3186 // The executableSections and exidxSections that we use to derive the 3187 // final contents of this SyntheticSection are populated before the 3188 // linker script assigns InputSections to OutputSections. The linker script 3189 // SECTIONS command may have a /DISCARD/ entry that removes executable 3190 // InputSections and their dependent .ARM.exidx section that we recorded 3191 // earlier. 3192 auto isDiscarded = [](const InputSection *isec) { return !isec->isLive(); }; 3193 llvm::erase_if(executableSections, isDiscarded); 3194 llvm::erase_if(exidxSections, isDiscarded); 3195 } 3196 3197 // Sort the executable sections that may or may not have associated 3198 // .ARM.exidx sections by order of ascending address. This requires the 3199 // relative positions of InputSections to be known. 3200 auto compareByFilePosition = [](const InputSection *a, 3201 const InputSection *b) { 3202 OutputSection *aOut = a->getParent(); 3203 OutputSection *bOut = b->getParent(); 3204 3205 if (aOut != bOut) 3206 return aOut->sectionIndex < bOut->sectionIndex; 3207 return a->outSecOff < b->outSecOff; 3208 }; 3209 llvm::stable_sort(executableSections, compareByFilePosition); 3210 sentinel = executableSections.back(); 3211 // Optionally merge adjacent duplicate entries. 3212 if (config->mergeArmExidx) { 3213 std::vector<InputSection *> selectedSections; 3214 selectedSections.reserve(executableSections.size()); 3215 selectedSections.push_back(executableSections[0]); 3216 size_t prev = 0; 3217 for (size_t i = 1; i < executableSections.size(); ++i) { 3218 InputSection *ex1 = findExidxSection(executableSections[prev]); 3219 InputSection *ex2 = findExidxSection(executableSections[i]); 3220 if (!isDuplicateArmExidxSec(ex1, ex2)) { 3221 selectedSections.push_back(executableSections[i]); 3222 prev = i; 3223 } 3224 } 3225 executableSections = std::move(selectedSections); 3226 } 3227 3228 size_t offset = 0; 3229 size = 0; 3230 for (InputSection *isec : executableSections) { 3231 if (InputSection *d = findExidxSection(isec)) { 3232 d->outSecOff = offset; 3233 d->parent = getParent(); 3234 offset += d->getSize(); 3235 } else { 3236 offset += 8; 3237 } 3238 } 3239 // Size includes Sentinel. 3240 size = offset + 8; 3241 } 3242 3243 InputSection *ARMExidxSyntheticSection::getLinkOrderDep() const { 3244 return executableSections.front(); 3245 } 3246 3247 // To write the .ARM.exidx table from the ExecutableSections we have three cases 3248 // 1.) The InputSection has a .ARM.exidx InputSection in its dependent sections. 3249 // We write the .ARM.exidx section contents and apply its relocations. 3250 // 2.) The InputSection does not have a dependent .ARM.exidx InputSection. We 3251 // must write the contents of an EXIDX_CANTUNWIND directly. We use the 3252 // start of the InputSection as the purpose of the linker generated 3253 // section is to terminate the address range of the previous entry. 3254 // 3.) A trailing EXIDX_CANTUNWIND sentinel section is required at the end of 3255 // the table to terminate the address range of the final entry. 3256 void ARMExidxSyntheticSection::writeTo(uint8_t *buf) { 3257 3258 const uint8_t cantUnwindData[8] = {0, 0, 0, 0, // PREL31 to target 3259 1, 0, 0, 0}; // EXIDX_CANTUNWIND 3260 3261 uint64_t offset = 0; 3262 for (InputSection *isec : executableSections) { 3263 assert(isec->getParent() != nullptr); 3264 if (InputSection *d = findExidxSection(isec)) { 3265 memcpy(buf + offset, d->data().data(), d->data().size()); 3266 d->relocateAlloc(buf, buf + d->getSize()); 3267 offset += d->getSize(); 3268 } else { 3269 // A Linker generated CANTUNWIND section. 3270 memcpy(buf + offset, cantUnwindData, sizeof(cantUnwindData)); 3271 uint64_t s = isec->getVA(); 3272 uint64_t p = getVA() + offset; 3273 target->relocateOne(buf + offset, R_ARM_PREL31, s - p); 3274 offset += 8; 3275 } 3276 } 3277 // Write Sentinel. 3278 memcpy(buf + offset, cantUnwindData, sizeof(cantUnwindData)); 3279 uint64_t s = sentinel->getVA(sentinel->getSize()); 3280 uint64_t p = getVA() + offset; 3281 target->relocateOne(buf + offset, R_ARM_PREL31, s - p); 3282 assert(size == offset + 8); 3283 } 3284 3285 bool ARMExidxSyntheticSection::classof(const SectionBase *d) { 3286 return d->kind() == InputSectionBase::Synthetic && d->type == SHT_ARM_EXIDX; 3287 } 3288 3289 ThunkSection::ThunkSection(OutputSection *os, uint64_t off) 3290 : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 3291 config->wordsize, ".text.thunk") { 3292 this->parent = os; 3293 this->outSecOff = off; 3294 } 3295 3296 void ThunkSection::addThunk(Thunk *t) { 3297 thunks.push_back(t); 3298 t->addSymbols(*this); 3299 } 3300 3301 void ThunkSection::writeTo(uint8_t *buf) { 3302 for (Thunk *t : thunks) 3303 t->writeTo(buf + t->offset); 3304 } 3305 3306 InputSection *ThunkSection::getTargetInputSection() const { 3307 if (thunks.empty()) 3308 return nullptr; 3309 const Thunk *t = thunks.front(); 3310 return t->getTargetInputSection(); 3311 } 3312 3313 bool ThunkSection::assignOffsets() { 3314 uint64_t off = 0; 3315 for (Thunk *t : thunks) { 3316 off = alignTo(off, t->alignment); 3317 t->setOffset(off); 3318 uint32_t size = t->size(); 3319 t->getThunkTargetSym()->size = size; 3320 off += size; 3321 } 3322 bool changed = off != size; 3323 size = off; 3324 return changed; 3325 } 3326 3327 PPC32Got2Section::PPC32Got2Section() 3328 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, 4, ".got2") {} 3329 3330 bool PPC32Got2Section::isNeeded() const { 3331 // See the comment below. This is not needed if there is no other 3332 // InputSection. 3333 for (BaseCommand *base : getParent()->sectionCommands) 3334 if (auto *isd = dyn_cast<InputSectionDescription>(base)) 3335 for (InputSection *isec : isd->sections) 3336 if (isec != this) 3337 return true; 3338 return false; 3339 } 3340 3341 void PPC32Got2Section::finalizeContents() { 3342 // PPC32 may create multiple GOT sections for -fPIC/-fPIE, one per file in 3343 // .got2 . This function computes outSecOff of each .got2 to be used in 3344 // PPC32PltCallStub::writeTo(). The purpose of this empty synthetic section is 3345 // to collect input sections named ".got2". 3346 uint32_t offset = 0; 3347 for (BaseCommand *base : getParent()->sectionCommands) 3348 if (auto *isd = dyn_cast<InputSectionDescription>(base)) { 3349 for (InputSection *isec : isd->sections) { 3350 if (isec == this) 3351 continue; 3352 isec->file->ppc32Got2OutSecOff = offset; 3353 offset += (uint32_t)isec->getSize(); 3354 } 3355 } 3356 } 3357 3358 // If linking position-dependent code then the table will store the addresses 3359 // directly in the binary so the section has type SHT_PROGBITS. If linking 3360 // position-independent code the section has type SHT_NOBITS since it will be 3361 // allocated and filled in by the dynamic linker. 3362 PPC64LongBranchTargetSection::PPC64LongBranchTargetSection() 3363 : SyntheticSection(SHF_ALLOC | SHF_WRITE, 3364 config->isPic ? SHT_NOBITS : SHT_PROGBITS, 8, 3365 ".branch_lt") {} 3366 3367 void PPC64LongBranchTargetSection::addEntry(Symbol &sym) { 3368 assert(sym.ppc64BranchltIndex == 0xffff); 3369 sym.ppc64BranchltIndex = entries.size(); 3370 entries.push_back(&sym); 3371 } 3372 3373 size_t PPC64LongBranchTargetSection::getSize() const { 3374 return entries.size() * 8; 3375 } 3376 3377 void PPC64LongBranchTargetSection::writeTo(uint8_t *buf) { 3378 // If linking non-pic we have the final addresses of the targets and they get 3379 // written to the table directly. For pic the dynamic linker will allocate 3380 // the section and fill it it. 3381 if (config->isPic) 3382 return; 3383 3384 for (const Symbol *sym : entries) { 3385 assert(sym->getVA()); 3386 // Need calls to branch to the local entry-point since a long-branch 3387 // must be a local-call. 3388 write64(buf, 3389 sym->getVA() + getPPC64GlobalEntryToLocalEntryOffset(sym->stOther)); 3390 buf += 8; 3391 } 3392 } 3393 3394 bool PPC64LongBranchTargetSection::isNeeded() const { 3395 // `removeUnusedSyntheticSections()` is called before thunk allocation which 3396 // is too early to determine if this section will be empty or not. We need 3397 // Finalized to keep the section alive until after thunk creation. Finalized 3398 // only gets set to true once `finalizeSections()` is called after thunk 3399 // creation. Becuase of this, if we don't create any long-branch thunks we end 3400 // up with an empty .branch_lt section in the binary. 3401 return !finalized || !entries.empty(); 3402 } 3403 3404 RISCVSdataSection::RISCVSdataSection() 3405 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, 1, ".sdata") {} 3406 3407 bool RISCVSdataSection::isNeeded() const { 3408 if (!ElfSym::riscvGlobalPointer) 3409 return false; 3410 3411 // __global_pointer$ is defined relative to .sdata . If the section does not 3412 // exist, create a dummy one. 3413 for (BaseCommand *base : getParent()->sectionCommands) 3414 if (auto *isd = dyn_cast<InputSectionDescription>(base)) 3415 for (InputSection *isec : isd->sections) 3416 if (isec != this) 3417 return false; 3418 return true; 3419 } 3420 3421 static uint8_t getAbiVersion() { 3422 // MIPS non-PIC executable gets ABI version 1. 3423 if (config->emachine == EM_MIPS) { 3424 if (!config->isPic && !config->relocatable && 3425 (config->eflags & (EF_MIPS_PIC | EF_MIPS_CPIC)) == EF_MIPS_CPIC) 3426 return 1; 3427 return 0; 3428 } 3429 3430 if (config->emachine == EM_AMDGPU) { 3431 uint8_t ver = objectFiles[0]->abiVersion; 3432 for (InputFile *file : makeArrayRef(objectFiles).slice(1)) 3433 if (file->abiVersion != ver) 3434 error("incompatible ABI version: " + toString(file)); 3435 return ver; 3436 } 3437 3438 return 0; 3439 } 3440 3441 template <typename ELFT> void elf::writeEhdr(uint8_t *buf, Partition &part) { 3442 // For executable segments, the trap instructions are written before writing 3443 // the header. Setting Elf header bytes to zero ensures that any unused bytes 3444 // in header are zero-cleared, instead of having trap instructions. 3445 memset(buf, 0, sizeof(typename ELFT::Ehdr)); 3446 memcpy(buf, "\177ELF", 4); 3447 3448 auto *eHdr = reinterpret_cast<typename ELFT::Ehdr *>(buf); 3449 eHdr->e_ident[EI_CLASS] = config->is64 ? ELFCLASS64 : ELFCLASS32; 3450 eHdr->e_ident[EI_DATA] = config->isLE ? ELFDATA2LSB : ELFDATA2MSB; 3451 eHdr->e_ident[EI_VERSION] = EV_CURRENT; 3452 eHdr->e_ident[EI_OSABI] = config->osabi; 3453 eHdr->e_ident[EI_ABIVERSION] = getAbiVersion(); 3454 eHdr->e_machine = config->emachine; 3455 eHdr->e_version = EV_CURRENT; 3456 eHdr->e_flags = config->eflags; 3457 eHdr->e_ehsize = sizeof(typename ELFT::Ehdr); 3458 eHdr->e_phnum = part.phdrs.size(); 3459 eHdr->e_shentsize = sizeof(typename ELFT::Shdr); 3460 3461 if (!config->relocatable) { 3462 eHdr->e_phoff = sizeof(typename ELFT::Ehdr); 3463 eHdr->e_phentsize = sizeof(typename ELFT::Phdr); 3464 } 3465 } 3466 3467 template <typename ELFT> void elf::writePhdrs(uint8_t *buf, Partition &part) { 3468 // Write the program header table. 3469 auto *hBuf = reinterpret_cast<typename ELFT::Phdr *>(buf); 3470 for (PhdrEntry *p : part.phdrs) { 3471 hBuf->p_type = p->p_type; 3472 hBuf->p_flags = p->p_flags; 3473 hBuf->p_offset = p->p_offset; 3474 hBuf->p_vaddr = p->p_vaddr; 3475 hBuf->p_paddr = p->p_paddr; 3476 hBuf->p_filesz = p->p_filesz; 3477 hBuf->p_memsz = p->p_memsz; 3478 hBuf->p_align = p->p_align; 3479 ++hBuf; 3480 } 3481 } 3482 3483 template <typename ELFT> 3484 PartitionElfHeaderSection<ELFT>::PartitionElfHeaderSection() 3485 : SyntheticSection(SHF_ALLOC, SHT_LLVM_PART_EHDR, 1, "") {} 3486 3487 template <typename ELFT> 3488 size_t PartitionElfHeaderSection<ELFT>::getSize() const { 3489 return sizeof(typename ELFT::Ehdr); 3490 } 3491 3492 template <typename ELFT> 3493 void PartitionElfHeaderSection<ELFT>::writeTo(uint8_t *buf) { 3494 writeEhdr<ELFT>(buf, getPartition()); 3495 3496 // Loadable partitions are always ET_DYN. 3497 auto *eHdr = reinterpret_cast<typename ELFT::Ehdr *>(buf); 3498 eHdr->e_type = ET_DYN; 3499 } 3500 3501 template <typename ELFT> 3502 PartitionProgramHeadersSection<ELFT>::PartitionProgramHeadersSection() 3503 : SyntheticSection(SHF_ALLOC, SHT_LLVM_PART_PHDR, 1, ".phdrs") {} 3504 3505 template <typename ELFT> 3506 size_t PartitionProgramHeadersSection<ELFT>::getSize() const { 3507 return sizeof(typename ELFT::Phdr) * getPartition().phdrs.size(); 3508 } 3509 3510 template <typename ELFT> 3511 void PartitionProgramHeadersSection<ELFT>::writeTo(uint8_t *buf) { 3512 writePhdrs<ELFT>(buf, getPartition()); 3513 } 3514 3515 PartitionIndexSection::PartitionIndexSection() 3516 : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".rodata") {} 3517 3518 size_t PartitionIndexSection::getSize() const { 3519 return 12 * (partitions.size() - 1); 3520 } 3521 3522 void PartitionIndexSection::finalizeContents() { 3523 for (size_t i = 1; i != partitions.size(); ++i) 3524 partitions[i].nameStrTab = mainPart->dynStrTab->addString(partitions[i].name); 3525 } 3526 3527 void PartitionIndexSection::writeTo(uint8_t *buf) { 3528 uint64_t va = getVA(); 3529 for (size_t i = 1; i != partitions.size(); ++i) { 3530 write32(buf, mainPart->dynStrTab->getVA() + partitions[i].nameStrTab - va); 3531 write32(buf + 4, partitions[i].elfHeader->getVA() - (va + 4)); 3532 3533 SyntheticSection *next = 3534 i == partitions.size() - 1 ? in.partEnd : partitions[i + 1].elfHeader; 3535 write32(buf + 8, next->getVA() - partitions[i].elfHeader->getVA()); 3536 3537 va += 12; 3538 buf += 12; 3539 } 3540 } 3541 3542 InStruct elf::in; 3543 3544 std::vector<Partition> elf::partitions; 3545 Partition *elf::mainPart; 3546 3547 template GdbIndexSection *GdbIndexSection::create<ELF32LE>(); 3548 template GdbIndexSection *GdbIndexSection::create<ELF32BE>(); 3549 template GdbIndexSection *GdbIndexSection::create<ELF64LE>(); 3550 template GdbIndexSection *GdbIndexSection::create<ELF64BE>(); 3551 3552 template void elf::splitSections<ELF32LE>(); 3553 template void elf::splitSections<ELF32BE>(); 3554 template void elf::splitSections<ELF64LE>(); 3555 template void elf::splitSections<ELF64BE>(); 3556 3557 template void EhFrameSection::addSection<ELF32LE>(InputSectionBase *); 3558 template void EhFrameSection::addSection<ELF32BE>(InputSectionBase *); 3559 template void EhFrameSection::addSection<ELF64LE>(InputSectionBase *); 3560 template void EhFrameSection::addSection<ELF64BE>(InputSectionBase *); 3561 3562 template void PltSection::addEntry<ELF32LE>(Symbol &Sym); 3563 template void PltSection::addEntry<ELF32BE>(Symbol &Sym); 3564 template void PltSection::addEntry<ELF64LE>(Symbol &Sym); 3565 template void PltSection::addEntry<ELF64BE>(Symbol &Sym); 3566 3567 template class elf::MipsAbiFlagsSection<ELF32LE>; 3568 template class elf::MipsAbiFlagsSection<ELF32BE>; 3569 template class elf::MipsAbiFlagsSection<ELF64LE>; 3570 template class elf::MipsAbiFlagsSection<ELF64BE>; 3571 3572 template class elf::MipsOptionsSection<ELF32LE>; 3573 template class elf::MipsOptionsSection<ELF32BE>; 3574 template class elf::MipsOptionsSection<ELF64LE>; 3575 template class elf::MipsOptionsSection<ELF64BE>; 3576 3577 template class elf::MipsReginfoSection<ELF32LE>; 3578 template class elf::MipsReginfoSection<ELF32BE>; 3579 template class elf::MipsReginfoSection<ELF64LE>; 3580 template class elf::MipsReginfoSection<ELF64BE>; 3581 3582 template class elf::DynamicSection<ELF32LE>; 3583 template class elf::DynamicSection<ELF32BE>; 3584 template class elf::DynamicSection<ELF64LE>; 3585 template class elf::DynamicSection<ELF64BE>; 3586 3587 template class elf::RelocationSection<ELF32LE>; 3588 template class elf::RelocationSection<ELF32BE>; 3589 template class elf::RelocationSection<ELF64LE>; 3590 template class elf::RelocationSection<ELF64BE>; 3591 3592 template class elf::AndroidPackedRelocationSection<ELF32LE>; 3593 template class elf::AndroidPackedRelocationSection<ELF32BE>; 3594 template class elf::AndroidPackedRelocationSection<ELF64LE>; 3595 template class elf::AndroidPackedRelocationSection<ELF64BE>; 3596 3597 template class elf::RelrSection<ELF32LE>; 3598 template class elf::RelrSection<ELF32BE>; 3599 template class elf::RelrSection<ELF64LE>; 3600 template class elf::RelrSection<ELF64BE>; 3601 3602 template class elf::SymbolTableSection<ELF32LE>; 3603 template class elf::SymbolTableSection<ELF32BE>; 3604 template class elf::SymbolTableSection<ELF64LE>; 3605 template class elf::SymbolTableSection<ELF64BE>; 3606 3607 template class elf::VersionNeedSection<ELF32LE>; 3608 template class elf::VersionNeedSection<ELF32BE>; 3609 template class elf::VersionNeedSection<ELF64LE>; 3610 template class elf::VersionNeedSection<ELF64BE>; 3611 3612 template void elf::writeEhdr<ELF32LE>(uint8_t *Buf, Partition &Part); 3613 template void elf::writeEhdr<ELF32BE>(uint8_t *Buf, Partition &Part); 3614 template void elf::writeEhdr<ELF64LE>(uint8_t *Buf, Partition &Part); 3615 template void elf::writeEhdr<ELF64BE>(uint8_t *Buf, Partition &Part); 3616 3617 template void elf::writePhdrs<ELF32LE>(uint8_t *Buf, Partition &Part); 3618 template void elf::writePhdrs<ELF32BE>(uint8_t *Buf, Partition &Part); 3619 template void elf::writePhdrs<ELF64LE>(uint8_t *Buf, Partition &Part); 3620 template void elf::writePhdrs<ELF64BE>(uint8_t *Buf, Partition &Part); 3621 3622 template class elf::PartitionElfHeaderSection<ELF32LE>; 3623 template class elf::PartitionElfHeaderSection<ELF32BE>; 3624 template class elf::PartitionElfHeaderSection<ELF64LE>; 3625 template class elf::PartitionElfHeaderSection<ELF64BE>; 3626 3627 template class elf::PartitionProgramHeadersSection<ELF32LE>; 3628 template class elf::PartitionProgramHeadersSection<ELF32BE>; 3629 template class elf::PartitionProgramHeadersSection<ELF64LE>; 3630 template class elf::PartitionProgramHeadersSection<ELF64BE>; 3631