1 //===- Relocations.cpp ----------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains platform-independent functions to process relocations. 10 // I'll describe the overview of this file here. 11 // 12 // Simple relocations are easy to handle for the linker. For example, 13 // for R_X86_64_PC64 relocs, the linker just has to fix up locations 14 // with the relative offsets to the target symbols. It would just be 15 // reading records from relocation sections and applying them to output. 16 // 17 // But not all relocations are that easy to handle. For example, for 18 // R_386_GOTOFF relocs, the linker has to create new GOT entries for 19 // symbols if they don't exist, and fix up locations with GOT entry 20 // offsets from the beginning of GOT section. So there is more than 21 // fixing addresses in relocation processing. 22 // 23 // ELF defines a large number of complex relocations. 24 // 25 // The functions in this file analyze relocations and do whatever needs 26 // to be done. It includes, but not limited to, the following. 27 // 28 // - create GOT/PLT entries 29 // - create new relocations in .dynsym to let the dynamic linker resolve 30 // them at runtime (since ELF supports dynamic linking, not all 31 // relocations can be resolved at link-time) 32 // - create COPY relocs and reserve space in .bss 33 // - replace expensive relocs (in terms of runtime cost) with cheap ones 34 // - error out infeasible combinations such as PIC and non-relative relocs 35 // 36 // Note that the functions in this file don't actually apply relocations 37 // because it doesn't know about the output file nor the output file buffer. 38 // It instead stores Relocation objects to InputSection's Relocations 39 // vector to let it apply later in InputSection::writeTo. 40 // 41 //===----------------------------------------------------------------------===// 42 43 #include "Relocations.h" 44 #include "Config.h" 45 #include "InputFiles.h" 46 #include "LinkerScript.h" 47 #include "OutputSections.h" 48 #include "SymbolTable.h" 49 #include "Symbols.h" 50 #include "SyntheticSections.h" 51 #include "Target.h" 52 #include "Thunks.h" 53 #include "lld/Common/ErrorHandler.h" 54 #include "lld/Common/Memory.h" 55 #include "llvm/ADT/SmallSet.h" 56 #include "llvm/BinaryFormat/ELF.h" 57 #include "llvm/Demangle/Demangle.h" 58 #include "llvm/Support/Endian.h" 59 #include <algorithm> 60 61 using namespace llvm; 62 using namespace llvm::ELF; 63 using namespace llvm::object; 64 using namespace llvm::support::endian; 65 using namespace lld; 66 using namespace lld::elf; 67 68 static void printDefinedLocation(ELFSyncStream &s, const Symbol &sym) { 69 s << "\n>>> defined in " << sym.file; 70 } 71 72 // Construct a message in the following format. 73 // 74 // >>> defined in /home/alice/src/foo.o 75 // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12) 76 // >>> /home/alice/src/bar.o:(.text+0x1) 77 static void printLocation(ELFSyncStream &s, InputSectionBase &sec, 78 const Symbol &sym, uint64_t off) { 79 printDefinedLocation(s, sym); 80 s << "\n>>> referenced by "; 81 auto tell = s.tell(); 82 s << sec.getSrcMsg(sym, off); 83 if (tell != s.tell()) 84 s << "\n>>> "; 85 s << sec.getObjMsg(off); 86 } 87 88 void elf::reportRangeError(Ctx &ctx, uint8_t *loc, const Relocation &rel, 89 const Twine &v, int64_t min, uint64_t max) { 90 ErrorPlace errPlace = getErrorPlace(ctx, loc); 91 auto diag = Err(ctx); 92 diag << errPlace.loc << "relocation " << rel.type 93 << " out of range: " << v.str() << " is not in [" << min << ", " << max 94 << ']'; 95 96 if (rel.sym) { 97 if (!rel.sym->isSection()) 98 diag << "; references '" << rel.sym << '\''; 99 else if (auto *d = dyn_cast<Defined>(rel.sym)) 100 diag << "; references section '" << d->section->name << "'"; 101 102 if (ctx.arg.emachine == EM_X86_64 && rel.type == R_X86_64_PC32 && 103 rel.sym->getOutputSection() && 104 (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) { 105 diag << "; R_X86_64_PC32 should not reference a section marked " 106 "SHF_X86_64_LARGE"; 107 } 108 } 109 if (!errPlace.srcLoc.empty()) 110 diag << "\n>>> referenced by " << errPlace.srcLoc; 111 if (rel.sym && !rel.sym->isSection()) 112 printDefinedLocation(diag, *rel.sym); 113 114 if (errPlace.isec && errPlace.isec->name.starts_with(".debug")) 115 diag << "; consider recompiling with -fdebug-types-section to reduce size " 116 "of debug sections"; 117 } 118 119 void elf::reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n, 120 const Symbol &sym, const Twine &msg) { 121 auto diag = Err(ctx); 122 diag << getErrorPlace(ctx, loc).loc << msg << " is out of range: " << v 123 << " is not in [" << llvm::minIntN(n) << ", " << llvm::maxIntN(n) << "]"; 124 if (!sym.getName().empty()) { 125 diag << "; references '" << &sym << '\''; 126 printDefinedLocation(diag, sym); 127 } 128 } 129 130 // Build a bitmask with one bit set for each 64 subset of RelExpr. 131 static constexpr uint64_t buildMask() { return 0; } 132 133 template <typename... Tails> 134 static constexpr uint64_t buildMask(int head, Tails... tails) { 135 return (0 <= head && head < 64 ? uint64_t(1) << head : 0) | 136 buildMask(tails...); 137 } 138 139 // Return true if `Expr` is one of `Exprs`. 140 // There are more than 64 but less than 128 RelExprs, so we divide the set of 141 // exprs into [0, 64) and [64, 128) and represent each range as a constant 142 // 64-bit mask. Then we decide which mask to test depending on the value of 143 // expr and use a simple shift and bitwise-and to test for membership. 144 template <RelExpr... Exprs> static bool oneof(RelExpr expr) { 145 assert(0 <= expr && (int)expr < 128 && 146 "RelExpr is too large for 128-bit mask!"); 147 148 if (expr >= 64) 149 return (uint64_t(1) << (expr - 64)) & buildMask((Exprs - 64)...); 150 return (uint64_t(1) << expr) & buildMask(Exprs...); 151 } 152 153 static RelType getMipsPairType(RelType type, bool isLocal) { 154 switch (type) { 155 case R_MIPS_HI16: 156 return R_MIPS_LO16; 157 case R_MIPS_GOT16: 158 // In case of global symbol, the R_MIPS_GOT16 relocation does not 159 // have a pair. Each global symbol has a unique entry in the GOT 160 // and a corresponding instruction with help of the R_MIPS_GOT16 161 // relocation loads an address of the symbol. In case of local 162 // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold 163 // the high 16 bits of the symbol's value. A paired R_MIPS_LO16 164 // relocations handle low 16 bits of the address. That allows 165 // to allocate only one GOT entry for every 64 KBytes of local data. 166 return isLocal ? R_MIPS_LO16 : R_MIPS_NONE; 167 case R_MICROMIPS_GOT16: 168 return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE; 169 case R_MIPS_PCHI16: 170 return R_MIPS_PCLO16; 171 case R_MICROMIPS_HI16: 172 return R_MICROMIPS_LO16; 173 default: 174 return R_MIPS_NONE; 175 } 176 } 177 178 // True if non-preemptable symbol always has the same value regardless of where 179 // the DSO is loaded. 180 static bool isAbsolute(const Symbol &sym) { 181 if (sym.isUndefWeak()) 182 return true; 183 if (const auto *dr = dyn_cast<Defined>(&sym)) 184 return dr->section == nullptr; // Absolute symbol. 185 return false; 186 } 187 188 static bool isAbsoluteValue(const Symbol &sym) { 189 return isAbsolute(sym) || sym.isTls(); 190 } 191 192 // Returns true if Expr refers a PLT entry. 193 static bool needsPlt(RelExpr expr) { 194 return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, 195 R_GOTPLT_PC, RE_LOONGARCH_PLT_PAGE_PC, RE_PPC32_PLTREL, 196 RE_PPC64_CALL_PLT>(expr); 197 } 198 199 bool lld::elf::needsGot(RelExpr expr) { 200 return oneof<R_GOT, RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, R_GOT_OFF, 201 RE_MIPS_GOT_LOCAL_PAGE, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, 202 RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, 203 RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT, 204 RE_AARCH64_GOT_PAGE, RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>( 205 expr); 206 } 207 208 // True if this expression is of the form Sym - X, where X is a position in the 209 // file (PC, or GOT for example). 210 static bool isRelExpr(RelExpr expr) { 211 return oneof<R_PC, R_GOTREL, R_GOTPLTREL, RE_ARM_PCA, RE_MIPS_GOTREL, 212 RE_PPC64_CALL, RE_PPC64_RELAX_TOC, RE_AARCH64_PAGE_PC, 213 R_RELAX_GOT_PC, RE_RISCV_PC_INDIRECT, RE_PPC64_RELAX_GOT_PC, 214 RE_LOONGARCH_PAGE_PC>(expr); 215 } 216 217 static RelExpr toPlt(RelExpr expr) { 218 switch (expr) { 219 case RE_LOONGARCH_PAGE_PC: 220 return RE_LOONGARCH_PLT_PAGE_PC; 221 case RE_PPC64_CALL: 222 return RE_PPC64_CALL_PLT; 223 case R_PC: 224 return R_PLT_PC; 225 case R_ABS: 226 return R_PLT; 227 case R_GOTREL: 228 return R_PLT_GOTREL; 229 default: 230 return expr; 231 } 232 } 233 234 static RelExpr fromPlt(RelExpr expr) { 235 // We decided not to use a plt. Optimize a reference to the plt to a 236 // reference to the symbol itself. 237 switch (expr) { 238 case R_PLT_PC: 239 case RE_PPC32_PLTREL: 240 return R_PC; 241 case RE_LOONGARCH_PLT_PAGE_PC: 242 return RE_LOONGARCH_PAGE_PC; 243 case RE_PPC64_CALL_PLT: 244 return RE_PPC64_CALL; 245 case R_PLT: 246 return R_ABS; 247 case R_PLT_GOTPLT: 248 return R_GOTPLTREL; 249 case R_PLT_GOTREL: 250 return R_GOTREL; 251 default: 252 return expr; 253 } 254 } 255 256 // Returns true if a given shared symbol is in a read-only segment in a DSO. 257 template <class ELFT> static bool isReadOnly(SharedSymbol &ss) { 258 using Elf_Phdr = typename ELFT::Phdr; 259 260 // Determine if the symbol is read-only by scanning the DSO's program headers. 261 const auto &file = cast<SharedFile>(*ss.file); 262 for (const Elf_Phdr &phdr : 263 check(file.template getObj<ELFT>().program_headers())) 264 if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) && 265 !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr && 266 ss.value < phdr.p_vaddr + phdr.p_memsz) 267 return true; 268 return false; 269 } 270 271 // Returns symbols at the same offset as a given symbol, including SS itself. 272 // 273 // If two or more symbols are at the same offset, and at least one of 274 // them are copied by a copy relocation, all of them need to be copied. 275 // Otherwise, they would refer to different places at runtime. 276 template <class ELFT> 277 static SmallSet<SharedSymbol *, 4> getSymbolsAt(Ctx &ctx, SharedSymbol &ss) { 278 using Elf_Sym = typename ELFT::Sym; 279 280 const auto &file = cast<SharedFile>(*ss.file); 281 282 SmallSet<SharedSymbol *, 4> ret; 283 for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) { 284 if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS || 285 s.getType() == STT_TLS || s.st_value != ss.value) 286 continue; 287 StringRef name = check(s.getName(file.getStringTable())); 288 Symbol *sym = ctx.symtab->find(name); 289 if (auto *alias = dyn_cast_or_null<SharedSymbol>(sym)) 290 ret.insert(alias); 291 } 292 293 // The loop does not check SHT_GNU_verneed, so ret does not contain 294 // non-default version symbols. If ss has a non-default version, ret won't 295 // contain ss. Just add ss unconditionally. If a non-default version alias is 296 // separately copy relocated, it and ss will have different addresses. 297 // Fortunately this case is impractical and fails with GNU ld as well. 298 ret.insert(&ss); 299 return ret; 300 } 301 302 // When a symbol is copy relocated or we create a canonical plt entry, it is 303 // effectively a defined symbol. In the case of copy relocation the symbol is 304 // in .bss and in the case of a canonical plt entry it is in .plt. This function 305 // replaces the existing symbol with a Defined pointing to the appropriate 306 // location. 307 static void replaceWithDefined(Ctx &ctx, Symbol &sym, SectionBase &sec, 308 uint64_t value, uint64_t size) { 309 Symbol old = sym; 310 Defined(ctx, sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value, 311 size, &sec) 312 .overwrite(sym); 313 314 sym.versionId = old.versionId; 315 sym.isUsedInRegularObj = true; 316 // A copy relocated alias may need a GOT entry. 317 sym.flags.store(old.flags.load(std::memory_order_relaxed) & NEEDS_GOT, 318 std::memory_order_relaxed); 319 } 320 321 // Reserve space in .bss or .bss.rel.ro for copy relocation. 322 // 323 // The copy relocation is pretty much a hack. If you use a copy relocation 324 // in your program, not only the symbol name but the symbol's size, RW/RO 325 // bit and alignment become part of the ABI. In addition to that, if the 326 // symbol has aliases, the aliases become part of the ABI. That's subtle, 327 // but if you violate that implicit ABI, that can cause very counter- 328 // intuitive consequences. 329 // 330 // So, what is the copy relocation? It's for linking non-position 331 // independent code to DSOs. In an ideal world, all references to data 332 // exported by DSOs should go indirectly through GOT. But if object files 333 // are compiled as non-PIC, all data references are direct. There is no 334 // way for the linker to transform the code to use GOT, as machine 335 // instructions are already set in stone in object files. This is where 336 // the copy relocation takes a role. 337 // 338 // A copy relocation instructs the dynamic linker to copy data from a DSO 339 // to a specified address (which is usually in .bss) at load-time. If the 340 // static linker (that's us) finds a direct data reference to a DSO 341 // symbol, it creates a copy relocation, so that the symbol can be 342 // resolved as if it were in .bss rather than in a DSO. 343 // 344 // As you can see in this function, we create a copy relocation for the 345 // dynamic linker, and the relocation contains not only symbol name but 346 // various other information about the symbol. So, such attributes become a 347 // part of the ABI. 348 // 349 // Note for application developers: I can give you a piece of advice if 350 // you are writing a shared library. You probably should export only 351 // functions from your library. You shouldn't export variables. 352 // 353 // As an example what can happen when you export variables without knowing 354 // the semantics of copy relocations, assume that you have an exported 355 // variable of type T. It is an ABI-breaking change to add new members at 356 // end of T even though doing that doesn't change the layout of the 357 // existing members. That's because the space for the new members are not 358 // reserved in .bss unless you recompile the main program. That means they 359 // are likely to overlap with other data that happens to be laid out next 360 // to the variable in .bss. This kind of issue is sometimes very hard to 361 // debug. What's a solution? Instead of exporting a variable V from a DSO, 362 // define an accessor getV(). 363 template <class ELFT> static void addCopyRelSymbol(Ctx &ctx, SharedSymbol &ss) { 364 // Copy relocation against zero-sized symbol doesn't make sense. 365 uint64_t symSize = ss.getSize(); 366 if (symSize == 0 || ss.alignment == 0) 367 Err(ctx) << "cannot create a copy relocation for symbol " << &ss; 368 369 // See if this symbol is in a read-only segment. If so, preserve the symbol's 370 // memory protection by reserving space in the .bss.rel.ro section. 371 bool isRO = isReadOnly<ELFT>(ss); 372 BssSection *sec = make<BssSection>(ctx, isRO ? ".bss.rel.ro" : ".bss", 373 symSize, ss.alignment); 374 OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent(); 375 376 // At this point, sectionBases has been migrated to sections. Append sec to 377 // sections. 378 if (osec->commands.empty() || 379 !isa<InputSectionDescription>(osec->commands.back())) 380 osec->commands.push_back(make<InputSectionDescription>("")); 381 auto *isd = cast<InputSectionDescription>(osec->commands.back()); 382 isd->sections.push_back(sec); 383 osec->commitSection(sec); 384 385 // Look through the DSO's dynamic symbol table for aliases and create a 386 // dynamic symbol for each one. This causes the copy relocation to correctly 387 // interpose any aliases. 388 for (SharedSymbol *sym : getSymbolsAt<ELFT>(ctx, ss)) 389 replaceWithDefined(ctx, *sym, *sec, 0, sym->size); 390 391 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->copyRel, *sec, 0, ss); 392 } 393 394 // .eh_frame sections are mergeable input sections, so their input 395 // offsets are not linearly mapped to output section. For each input 396 // offset, we need to find a section piece containing the offset and 397 // add the piece's base address to the input offset to compute the 398 // output offset. That isn't cheap. 399 // 400 // This class is to speed up the offset computation. When we process 401 // relocations, we access offsets in the monotonically increasing 402 // order. So we can optimize for that access pattern. 403 // 404 // For sections other than .eh_frame, this class doesn't do anything. 405 namespace { 406 class OffsetGetter { 407 public: 408 OffsetGetter() = default; 409 explicit OffsetGetter(InputSectionBase &sec) { 410 if (auto *eh = dyn_cast<EhInputSection>(&sec)) { 411 cies = eh->cies; 412 fdes = eh->fdes; 413 i = cies.begin(); 414 j = fdes.begin(); 415 } 416 } 417 418 // Translates offsets in input sections to offsets in output sections. 419 // Given offset must increase monotonically. We assume that Piece is 420 // sorted by inputOff. 421 uint64_t get(Ctx &ctx, uint64_t off) { 422 if (cies.empty()) 423 return off; 424 425 while (j != fdes.end() && j->inputOff <= off) 426 ++j; 427 auto it = j; 428 if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) { 429 while (i != cies.end() && i->inputOff <= off) 430 ++i; 431 if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off) { 432 Err(ctx) << ".eh_frame: relocation is not in any piece"; 433 return 0; 434 } 435 it = i; 436 } 437 438 // Offset -1 means that the piece is dead (i.e. garbage collected). 439 if (it[-1].outputOff == -1) 440 return -1; 441 return it[-1].outputOff + (off - it[-1].inputOff); 442 } 443 444 private: 445 ArrayRef<EhSectionPiece> cies, fdes; 446 ArrayRef<EhSectionPiece>::iterator i, j; 447 }; 448 449 // This class encapsulates states needed to scan relocations for one 450 // InputSectionBase. 451 class RelocationScanner { 452 public: 453 RelocationScanner(Ctx &ctx) : ctx(ctx) {} 454 template <class ELFT> 455 void scanSection(InputSectionBase &s, bool isEH = false); 456 457 private: 458 Ctx &ctx; 459 InputSectionBase *sec; 460 OffsetGetter getter; 461 462 // End of relocations, used by Mips/PPC64. 463 const void *end = nullptr; 464 465 template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const; 466 template <class ELFT, class RelTy> 467 int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const; 468 bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym, 469 uint64_t relOff) const; 470 void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym, 471 int64_t addend) const; 472 unsigned handleTlsRelocation(RelExpr expr, RelType type, uint64_t offset, 473 Symbol &sym, int64_t addend); 474 475 template <class ELFT, class RelTy> 476 void scanOne(typename Relocs<RelTy>::const_iterator &i); 477 template <class ELFT, class RelTy> void scan(Relocs<RelTy> rels); 478 }; 479 } // namespace 480 481 // MIPS has an odd notion of "paired" relocations to calculate addends. 482 // For example, if a relocation is of R_MIPS_HI16, there must be a 483 // R_MIPS_LO16 relocation after that, and an addend is calculated using 484 // the two relocations. 485 template <class ELFT, class RelTy> 486 int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr, 487 bool isLocal) const { 488 if (expr == RE_MIPS_GOTREL && isLocal) 489 return sec->getFile<ELFT>()->mipsGp0; 490 491 // The ABI says that the paired relocation is used only for REL. 492 // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 493 // This generalises to relocation types with implicit addends. 494 if (RelTy::HasAddend) 495 return 0; 496 497 RelType type = rel.getType(ctx.arg.isMips64EL); 498 RelType pairTy = getMipsPairType(type, isLocal); 499 if (pairTy == R_MIPS_NONE) 500 return 0; 501 502 const uint8_t *buf = sec->content().data(); 503 uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL); 504 505 // To make things worse, paired relocations might not be contiguous in 506 // the relocation table, so we need to do linear search. *sigh* 507 for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri) 508 if (ri->getType(ctx.arg.isMips64EL) == pairTy && 509 ri->getSymbol(ctx.arg.isMips64EL) == symIndex) 510 return ctx.target->getImplicitAddend(buf + ri->r_offset, pairTy); 511 512 Warn(ctx) << "can't find matching " << pairTy << " relocation for " << type; 513 return 0; 514 } 515 516 // Custom error message if Sym is defined in a discarded section. 517 template <class ELFT> 518 static void maybeReportDiscarded(Ctx &ctx, ELFSyncStream &msg, Undefined &sym) { 519 auto *file = dyn_cast<ObjFile<ELFT>>(sym.file); 520 if (!file || !sym.discardedSecIdx) 521 return; 522 ArrayRef<typename ELFT::Shdr> objSections = 523 file->template getELFShdrs<ELFT>(); 524 525 if (sym.type == ELF::STT_SECTION) { 526 msg << "relocation refers to a discarded section: "; 527 msg << CHECK2( 528 file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file); 529 } else { 530 msg << "relocation refers to a symbol in a discarded section: " << &sym; 531 } 532 msg << "\n>>> defined in " << file; 533 534 Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1]; 535 if (elfSec.sh_type != SHT_GROUP) 536 return; 537 538 // If the discarded section is a COMDAT. 539 StringRef signature = file->getShtGroupSignature(objSections, elfSec); 540 if (const InputFile *prevailing = 541 ctx.symtab->comdatGroups.lookup(CachedHashStringRef(signature))) { 542 msg << "\n>>> section group signature: " << signature 543 << "\n>>> prevailing definition is in " << prevailing; 544 if (sym.nonPrevailing) { 545 msg << "\n>>> or the symbol in the prevailing group had STB_WEAK " 546 "binding and the symbol in a non-prevailing group had STB_GLOBAL " 547 "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding " 548 "signature is not supported"; 549 } 550 } 551 } 552 553 // Check whether the definition name def is a mangled function name that matches 554 // the reference name ref. 555 static bool canSuggestExternCForCXX(StringRef ref, StringRef def) { 556 llvm::ItaniumPartialDemangler d; 557 std::string name = def.str(); 558 if (d.partialDemangle(name.c_str())) 559 return false; 560 char *buf = d.getFunctionName(nullptr, nullptr); 561 if (!buf) 562 return false; 563 bool ret = ref == buf; 564 free(buf); 565 return ret; 566 } 567 568 // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns 569 // the suggested symbol, which is either in the symbol table, or in the same 570 // file of sym. 571 static const Symbol *getAlternativeSpelling(Ctx &ctx, const Undefined &sym, 572 std::string &pre_hint, 573 std::string &post_hint) { 574 DenseMap<StringRef, const Symbol *> map; 575 if (sym.file->kind() == InputFile::ObjKind) { 576 auto *file = cast<ELFFileBase>(sym.file); 577 // If sym is a symbol defined in a discarded section, maybeReportDiscarded() 578 // will give an error. Don't suggest an alternative spelling. 579 if (sym.discardedSecIdx != 0 && 580 file->getSections()[sym.discardedSecIdx] == &InputSection::discarded) 581 return nullptr; 582 583 // Build a map of local defined symbols. 584 for (const Symbol *s : sym.file->getSymbols()) 585 if (s->isLocal() && s->isDefined() && !s->getName().empty()) 586 map.try_emplace(s->getName(), s); 587 } 588 589 auto suggest = [&](StringRef newName) -> const Symbol * { 590 // If defined locally. 591 if (const Symbol *s = map.lookup(newName)) 592 return s; 593 594 // If in the symbol table and not undefined. 595 if (const Symbol *s = ctx.symtab->find(newName)) 596 if (!s->isUndefined()) 597 return s; 598 599 return nullptr; 600 }; 601 602 // This loop enumerates all strings of Levenshtein distance 1 as typo 603 // correction candidates and suggests the one that exists as a non-undefined 604 // symbol. 605 StringRef name = sym.getName(); 606 for (size_t i = 0, e = name.size(); i != e + 1; ++i) { 607 // Insert a character before name[i]. 608 std::string newName = (name.substr(0, i) + "0" + name.substr(i)).str(); 609 for (char c = '0'; c <= 'z'; ++c) { 610 newName[i] = c; 611 if (const Symbol *s = suggest(newName)) 612 return s; 613 } 614 if (i == e) 615 break; 616 617 // Substitute name[i]. 618 newName = std::string(name); 619 for (char c = '0'; c <= 'z'; ++c) { 620 newName[i] = c; 621 if (const Symbol *s = suggest(newName)) 622 return s; 623 } 624 625 // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is 626 // common. 627 if (i + 1 < e) { 628 newName[i] = name[i + 1]; 629 newName[i + 1] = name[i]; 630 if (const Symbol *s = suggest(newName)) 631 return s; 632 } 633 634 // Delete name[i]. 635 newName = (name.substr(0, i) + name.substr(i + 1)).str(); 636 if (const Symbol *s = suggest(newName)) 637 return s; 638 } 639 640 // Case mismatch, e.g. Foo vs FOO. 641 for (auto &it : map) 642 if (name.equals_insensitive(it.first)) 643 return it.second; 644 for (Symbol *sym : ctx.symtab->getSymbols()) 645 if (!sym->isUndefined() && name.equals_insensitive(sym->getName())) 646 return sym; 647 648 // The reference may be a mangled name while the definition is not. Suggest a 649 // missing extern "C". 650 if (name.starts_with("_Z")) { 651 std::string buf = name.str(); 652 llvm::ItaniumPartialDemangler d; 653 if (!d.partialDemangle(buf.c_str())) 654 if (char *buf = d.getFunctionName(nullptr, nullptr)) { 655 const Symbol *s = suggest(buf); 656 free(buf); 657 if (s) { 658 pre_hint = ": extern \"C\" "; 659 return s; 660 } 661 } 662 } else { 663 const Symbol *s = nullptr; 664 for (auto &it : map) 665 if (canSuggestExternCForCXX(name, it.first)) { 666 s = it.second; 667 break; 668 } 669 if (!s) 670 for (Symbol *sym : ctx.symtab->getSymbols()) 671 if (canSuggestExternCForCXX(name, sym->getName())) { 672 s = sym; 673 break; 674 } 675 if (s) { 676 pre_hint = " to declare "; 677 post_hint = " as extern \"C\"?"; 678 return s; 679 } 680 } 681 682 return nullptr; 683 } 684 685 static void reportUndefinedSymbol(Ctx &ctx, const UndefinedDiag &undef, 686 bool correctSpelling) { 687 Undefined &sym = *undef.sym; 688 ELFSyncStream msg(ctx, DiagLevel::None); 689 690 auto visibility = [&]() { 691 switch (sym.visibility()) { 692 case STV_INTERNAL: 693 return "internal "; 694 case STV_HIDDEN: 695 return "hidden "; 696 case STV_PROTECTED: 697 return "protected "; 698 default: 699 return ""; 700 } 701 }; 702 703 switch (ctx.arg.ekind) { 704 case ELF32LEKind: 705 maybeReportDiscarded<ELF32LE>(ctx, msg, sym); 706 break; 707 case ELF32BEKind: 708 maybeReportDiscarded<ELF32BE>(ctx, msg, sym); 709 break; 710 case ELF64LEKind: 711 maybeReportDiscarded<ELF64LE>(ctx, msg, sym); 712 break; 713 case ELF64BEKind: 714 maybeReportDiscarded<ELF64BE>(ctx, msg, sym); 715 break; 716 default: 717 llvm_unreachable(""); 718 } 719 if (msg.str().empty()) 720 msg << "undefined " << visibility() << "symbol: " << &sym; 721 722 const size_t maxUndefReferences = 3; 723 for (UndefinedDiag::Loc l : 724 ArrayRef(undef.locs).take_front(maxUndefReferences)) { 725 InputSectionBase &sec = *l.sec; 726 uint64_t offset = l.offset; 727 728 msg << "\n>>> referenced by "; 729 // In the absence of line number information, utilize DW_TAG_variable (if 730 // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`). 731 Symbol *enclosing = sec.getEnclosingSymbol(offset); 732 733 ELFSyncStream msg1(ctx, DiagLevel::None); 734 auto tell = msg.tell(); 735 msg << sec.getSrcMsg(enclosing ? *enclosing : sym, offset); 736 if (tell != msg.tell()) 737 msg << "\n>>> "; 738 msg << sec.getObjMsg(offset); 739 } 740 741 if (maxUndefReferences < undef.locs.size()) 742 msg << "\n>>> referenced " << (undef.locs.size() - maxUndefReferences) 743 << " more times"; 744 745 if (correctSpelling) { 746 std::string pre_hint = ": ", post_hint; 747 if (const Symbol *corrected = 748 getAlternativeSpelling(ctx, sym, pre_hint, post_hint)) { 749 msg << "\n>>> did you mean" << pre_hint << corrected << post_hint 750 << "\n>>> defined in: " << corrected->file; 751 } 752 } 753 754 if (sym.getName().starts_with("_ZTV")) 755 msg << "\n>>> the vtable symbol may be undefined because the class is " 756 "missing its key function " 757 "(see https://lld.llvm.org/missingkeyfunction)"; 758 if (ctx.arg.gcSections && ctx.arg.zStartStopGC && 759 sym.getName().starts_with("__start_")) { 760 msg << "\n>>> the encapsulation symbol needs to be retained under " 761 "--gc-sections properly; consider -z nostart-stop-gc " 762 "(see https://lld.llvm.org/ELF/start-stop-gc)"; 763 } 764 765 if (undef.isWarning) 766 Warn(ctx) << msg.str(); 767 else 768 ctx.e.error(msg.str(), ErrorTag::SymbolNotFound, {sym.getName()}); 769 } 770 771 void elf::reportUndefinedSymbols(Ctx &ctx) { 772 // Find the first "undefined symbol" diagnostic for each diagnostic, and 773 // collect all "referenced from" lines at the first diagnostic. 774 DenseMap<Symbol *, UndefinedDiag *> firstRef; 775 for (UndefinedDiag &undef : ctx.undefErrs) { 776 assert(undef.locs.size() == 1); 777 if (UndefinedDiag *canon = firstRef.lookup(undef.sym)) { 778 canon->locs.push_back(undef.locs[0]); 779 undef.locs.clear(); 780 } else 781 firstRef[undef.sym] = &undef; 782 } 783 784 // Enable spell corrector for the first 2 diagnostics. 785 for (auto [i, undef] : llvm::enumerate(ctx.undefErrs)) 786 if (!undef.locs.empty()) 787 reportUndefinedSymbol(ctx, undef, i < 2); 788 } 789 790 // Report an undefined symbol if necessary. 791 // Returns true if the undefined symbol will produce an error message. 792 static bool maybeReportUndefined(Ctx &ctx, Undefined &sym, 793 InputSectionBase &sec, uint64_t offset) { 794 std::lock_guard<std::mutex> lock(ctx.relocMutex); 795 // If versioned, issue an error (even if the symbol is weak) because we don't 796 // know the defining filename which is required to construct a Verneed entry. 797 if (sym.hasVersionSuffix) { 798 ctx.undefErrs.push_back({&sym, {{&sec, offset}}, false}); 799 return true; 800 } 801 if (sym.isWeak()) 802 return false; 803 804 bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT; 805 if (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal) 806 return false; 807 808 // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc 809 // which references a switch table in a discarded .rodata/.text section. The 810 // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF 811 // spec says references from outside the group to a STB_LOCAL symbol are not 812 // allowed. Work around the bug. 813 // 814 // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible 815 // because .LC0-.LTOC is not representable if the two labels are in different 816 // .got2 817 if (sym.discardedSecIdx != 0 && (sec.name == ".got2" || sec.name == ".toc")) 818 return false; 819 820 bool isWarning = 821 (ctx.arg.unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) || 822 ctx.arg.noinhibitExec; 823 ctx.undefErrs.push_back({&sym, {{&sec, offset}}, isWarning}); 824 return !isWarning; 825 } 826 827 // MIPS N32 ABI treats series of successive relocations with the same offset 828 // as a single relocation. The similar approach used by N64 ABI, but this ABI 829 // packs all relocations into the single relocation record. Here we emulate 830 // this for the N32 ABI. Iterate over relocation with the same offset and put 831 // theirs types into the single bit-set. 832 template <class RelTy> 833 RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const { 834 uint32_t type = 0; 835 uint64_t offset = rel->r_offset; 836 837 int n = 0; 838 while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset) 839 type |= (rel++)->getType(ctx.arg.isMips64EL) << (8 * n++); 840 return type; 841 } 842 843 template <bool shard = false> 844 static void addRelativeReloc(Ctx &ctx, InputSectionBase &isec, 845 uint64_t offsetInSec, Symbol &sym, int64_t addend, 846 RelExpr expr, RelType type) { 847 Partition &part = isec.getPartition(ctx); 848 849 if (sym.isTagged()) { 850 std::lock_guard<std::mutex> lock(ctx.relocMutex); 851 part.relaDyn->addRelativeReloc(ctx.target->relativeRel, isec, offsetInSec, 852 sym, addend, type, expr); 853 // With MTE globals, we always want to derive the address tag by `ldg`-ing 854 // the symbol. When we have a RELATIVE relocation though, we no longer have 855 // a reference to the symbol. Because of this, when we have an addend that 856 // puts the result of the RELATIVE relocation out-of-bounds of the symbol 857 // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI 858 // says we should store the offset to the start of the symbol in the target 859 // field. This is described in further detail in: 860 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative 861 if (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize()) 862 isec.relocations.push_back({expr, type, offsetInSec, addend, &sym}); 863 return; 864 } 865 866 // Add a relative relocation. If relrDyn section is enabled, and the 867 // relocation offset is guaranteed to be even, add the relocation to 868 // the relrDyn section, otherwise add it to the relaDyn section. 869 // relrDyn sections don't support odd offsets. Also, relrDyn sections 870 // don't store the addend values, so we must write it to the relocated 871 // address. 872 if (part.relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) { 873 isec.addReloc({expr, type, offsetInSec, addend, &sym}); 874 if (shard) 875 part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back( 876 {&isec, isec.relocs().size() - 1}); 877 else 878 part.relrDyn->relocs.push_back({&isec, isec.relocs().size() - 1}); 879 return; 880 } 881 part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec, 882 offsetInSec, sym, addend, type, expr); 883 } 884 885 template <class PltSection, class GotPltSection> 886 static void addPltEntry(Ctx &ctx, PltSection &plt, GotPltSection &gotPlt, 887 RelocationBaseSection &rel, RelType type, Symbol &sym) { 888 plt.addEntry(sym); 889 gotPlt.addEntry(sym); 890 rel.addReloc({type, &gotPlt, sym.getGotPltOffset(ctx), 891 sym.isPreemptible ? DynamicReloc::AgainstSymbol 892 : DynamicReloc::AddendOnlyWithTargetVA, 893 sym, 0, R_ABS}); 894 } 895 896 void elf::addGotEntry(Ctx &ctx, Symbol &sym) { 897 ctx.in.got->addEntry(sym); 898 uint64_t off = sym.getGotOffset(ctx); 899 900 // If preemptible, emit a GLOB_DAT relocation. 901 if (sym.isPreemptible) { 902 ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, ctx.in.got.get(), off, 903 DynamicReloc::AgainstSymbol, sym, 0, 904 R_ABS}); 905 return; 906 } 907 908 // Otherwise, the value is either a link-time constant or the load base 909 // plus a constant. 910 if (!ctx.arg.isPic || isAbsolute(sym)) 911 ctx.in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym}); 912 else 913 addRelativeReloc(ctx, *ctx.in.got, off, sym, 0, R_ABS, 914 ctx.target->symbolicRel); 915 } 916 917 static void addGotAuthEntry(Ctx &ctx, Symbol &sym) { 918 ctx.in.got->addEntry(sym); 919 ctx.in.got->addAuthEntry(sym); 920 uint64_t off = sym.getGotOffset(ctx); 921 922 // If preemptible, emit a GLOB_DAT relocation. 923 if (sym.isPreemptible) { 924 ctx.mainPart->relaDyn->addReloc({R_AARCH64_AUTH_GLOB_DAT, ctx.in.got.get(), 925 off, DynamicReloc::AgainstSymbol, sym, 0, 926 R_ABS}); 927 return; 928 } 929 930 // Signed GOT requires dynamic relocation. 931 ctx.in.got->getPartition(ctx).relaDyn->addReloc( 932 {R_AARCH64_AUTH_RELATIVE, ctx.in.got.get(), off, 933 DynamicReloc::AddendOnlyWithTargetVA, sym, 0, R_ABS}); 934 } 935 936 static void addTpOffsetGotEntry(Ctx &ctx, Symbol &sym) { 937 ctx.in.got->addEntry(sym); 938 uint64_t off = sym.getGotOffset(ctx); 939 if (!sym.isPreemptible && !ctx.arg.shared) { 940 ctx.in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym}); 941 return; 942 } 943 ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( 944 ctx.target->tlsGotRel, *ctx.in.got, off, sym, ctx.target->symbolicRel); 945 } 946 947 // Return true if we can define a symbol in the executable that 948 // contains the value/function of a symbol defined in a shared 949 // library. 950 static bool canDefineSymbolInExecutable(Ctx &ctx, Symbol &sym) { 951 // If the symbol has default visibility the symbol defined in the 952 // executable will preempt it. 953 // Note that we want the visibility of the shared symbol itself, not 954 // the visibility of the symbol in the output file we are producing. 955 if (!sym.dsoProtected) 956 return true; 957 958 // If we are allowed to break address equality of functions, defining 959 // a plt entry will allow the program to call the function in the 960 // .so, but the .so and the executable will no agree on the address 961 // of the function. Similar logic for objects. 962 return ((sym.isFunc() && ctx.arg.ignoreFunctionAddressEquality) || 963 (sym.isObject() && ctx.arg.ignoreDataAddressEquality)); 964 } 965 966 // Returns true if a given relocation can be computed at link-time. 967 // This only handles relocation types expected in processAux. 968 // 969 // For instance, we know the offset from a relocation to its target at 970 // link-time if the relocation is PC-relative and refers a 971 // non-interposable function in the same executable. This function 972 // will return true for such relocation. 973 // 974 // If this function returns false, that means we need to emit a 975 // dynamic relocation so that the relocation will be fixed at load-time. 976 bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type, 977 const Symbol &sym, 978 uint64_t relOff) const { 979 // These expressions always compute a constant 980 if (oneof< 981 R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, RE_MIPS_GOT_LOCAL_PAGE, 982 RE_MIPS_GOTREL, RE_MIPS_GOT_OFF, RE_MIPS_GOT_OFF32, RE_MIPS_GOT_GP_PC, 983 RE_AARCH64_GOT_PAGE_PC, RE_AARCH64_AUTH_GOT_PAGE_PC, R_GOT_PC, 984 R_GOTONLY_PC, R_GOTPLTONLY_PC, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, 985 R_GOTPLT_GOTREL, R_GOTPLT_PC, RE_PPC32_PLTREL, RE_PPC64_CALL_PLT, 986 RE_PPC64_RELAX_TOC, RE_RISCV_ADD, RE_AARCH64_GOT_PAGE, 987 RE_AARCH64_AUTH_GOT, RE_AARCH64_AUTH_GOT_PC, RE_LOONGARCH_PLT_PAGE_PC, 988 RE_LOONGARCH_GOT, RE_LOONGARCH_GOT_PAGE_PC>(e)) 989 return true; 990 991 // These never do, except if the entire file is position dependent or if 992 // only the low bits are used. 993 if (e == R_GOT || e == R_PLT) 994 return ctx.target->usesOnlyLowPageBits(type) || !ctx.arg.isPic; 995 996 // R_AARCH64_AUTH_ABS64 requires a dynamic relocation. 997 if (sym.isPreemptible || e == RE_AARCH64_AUTH) 998 return false; 999 if (!ctx.arg.isPic) 1000 return true; 1001 1002 // Constant when referencing a non-preemptible symbol. 1003 if (e == R_SIZE || e == RE_RISCV_LEB128) 1004 return true; 1005 1006 // For the target and the relocation, we want to know if they are 1007 // absolute or relative. 1008 bool absVal = isAbsoluteValue(sym); 1009 bool relE = isRelExpr(e); 1010 if (absVal && !relE) 1011 return true; 1012 if (!absVal && relE) 1013 return true; 1014 if (!absVal && !relE) 1015 return ctx.target->usesOnlyLowPageBits(type); 1016 1017 assert(absVal && relE); 1018 1019 // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol 1020 // in PIC mode. This is a little strange, but it allows us to link function 1021 // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers). 1022 // Normally such a call will be guarded with a comparison, which will load a 1023 // zero from the GOT. 1024 if (sym.isUndefWeak()) 1025 return true; 1026 1027 // We set the final symbols values for linker script defined symbols later. 1028 // They always can be computed as a link time constant. 1029 if (sym.scriptDefined) 1030 return true; 1031 1032 auto diag = Err(ctx); 1033 diag << "relocation " << type << " cannot refer to absolute symbol: " << &sym; 1034 printLocation(diag, *sec, sym, relOff); 1035 return true; 1036 } 1037 1038 // The reason we have to do this early scan is as follows 1039 // * To mmap the output file, we need to know the size 1040 // * For that, we need to know how many dynamic relocs we will have. 1041 // It might be possible to avoid this by outputting the file with write: 1042 // * Write the allocated output sections, computing addresses. 1043 // * Apply relocations, recording which ones require a dynamic reloc. 1044 // * Write the dynamic relocations. 1045 // * Write the rest of the file. 1046 // This would have some drawbacks. For example, we would only know if .rela.dyn 1047 // is needed after applying relocations. If it is, it will go after rw and rx 1048 // sections. Given that it is ro, we will need an extra PT_LOAD. This 1049 // complicates things for the dynamic linker and means we would have to reserve 1050 // space for the extra PT_LOAD even if we end up not using it. 1051 void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, 1052 Symbol &sym, int64_t addend) const { 1053 // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT 1054 // indirection. 1055 const bool isIfunc = sym.isGnuIFunc(); 1056 if (!sym.isPreemptible && (!isIfunc || ctx.arg.zIfuncNoplt)) { 1057 if (expr != R_GOT_PC) { 1058 // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call 1059 // stub type. It should be ignored if optimized to R_PC. 1060 if (ctx.arg.emachine == EM_PPC && expr == RE_PPC32_PLTREL) 1061 addend &= ~0x8000; 1062 // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into 1063 // call __tls_get_addr even if the symbol is non-preemptible. 1064 if (!(ctx.arg.emachine == EM_HEXAGON && 1065 (type == R_HEX_GD_PLT_B22_PCREL || 1066 type == R_HEX_GD_PLT_B22_PCREL_X || 1067 type == R_HEX_GD_PLT_B32_PCREL_X))) 1068 expr = fromPlt(expr); 1069 } else if (!isAbsoluteValue(sym)) { 1070 expr = ctx.target->adjustGotPcExpr(type, addend, 1071 sec->content().data() + offset); 1072 // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up 1073 // needing the GOT if we can't relax everything. 1074 if (expr == R_RELAX_GOT_PC) 1075 ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed); 1076 } 1077 } 1078 1079 // We were asked not to generate PLT entries for ifuncs. Instead, pass the 1080 // direct relocation on through. 1081 if (LLVM_UNLIKELY(isIfunc) && ctx.arg.zIfuncNoplt) { 1082 std::lock_guard<std::mutex> lock(ctx.relocMutex); 1083 sym.isExported = true; 1084 ctx.mainPart->relaDyn->addSymbolReloc(type, *sec, offset, sym, addend, 1085 type); 1086 return; 1087 } 1088 1089 if (needsGot(expr)) { 1090 if (ctx.arg.emachine == EM_MIPS) { 1091 // MIPS ABI has special rules to process GOT entries and doesn't 1092 // require relocation entries for them. A special case is TLS 1093 // relocations. In that case dynamic loader applies dynamic 1094 // relocations to initialize TLS GOT entries. 1095 // See "Global Offset Table" in Chapter 5 in the following document 1096 // for detailed description: 1097 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 1098 ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr); 1099 } else if (!sym.isTls() || ctx.arg.emachine != EM_LOONGARCH) { 1100 // Many LoongArch TLS relocs reuse the RE_LOONGARCH_GOT type, in which 1101 // case the NEEDS_GOT flag shouldn't get set. 1102 if (expr == RE_AARCH64_AUTH_GOT || expr == RE_AARCH64_AUTH_GOT_PAGE_PC || 1103 expr == RE_AARCH64_AUTH_GOT_PC) 1104 sym.setFlags(NEEDS_GOT | NEEDS_GOT_AUTH); 1105 else 1106 sym.setFlags(NEEDS_GOT | NEEDS_GOT_NONAUTH); 1107 } 1108 } else if (needsPlt(expr)) { 1109 sym.setFlags(NEEDS_PLT); 1110 } else if (LLVM_UNLIKELY(isIfunc)) { 1111 sym.setFlags(HAS_DIRECT_RELOC); 1112 } 1113 1114 // If the relocation is known to be a link-time constant, we know no dynamic 1115 // relocation will be created, pass the control to relocateAlloc() or 1116 // relocateNonAlloc() to resolve it. 1117 // 1118 // The behavior of an undefined weak reference is implementation defined. For 1119 // non-link-time constants, we resolve relocations statically (let 1120 // relocate{,Non}Alloc() resolve them) for -no-pie and try producing dynamic 1121 // relocations for -pie and -shared. 1122 // 1123 // The general expectation of -no-pie static linking is that there is no 1124 // dynamic relocation (except IRELATIVE). Emitting dynamic relocations for 1125 // -shared matches the spirit of its -z undefs default. -pie has freedom on 1126 // choices, and we choose dynamic relocations to be consistent with the 1127 // handling of GOT-generating relocations. 1128 if (isStaticLinkTimeConstant(expr, type, sym, offset) || 1129 (!ctx.arg.isPic && sym.isUndefWeak())) { 1130 sec->addReloc({expr, type, offset, addend, &sym}); 1131 return; 1132 } 1133 1134 // Use a simple -z notext rule that treats all sections except .eh_frame as 1135 // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our 1136 // SectionBase::getOffset would incorrectly adjust the offset). 1137 // 1138 // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel 1139 // conversion. We still emit a dynamic relocation. 1140 bool canWrite = (sec->flags & SHF_WRITE) || 1141 !(ctx.arg.zText || 1142 (isa<EhInputSection>(sec) && ctx.arg.emachine != EM_MIPS)); 1143 if (canWrite) { 1144 RelType rel = ctx.target->getDynRel(type); 1145 if (oneof<R_GOT, RE_LOONGARCH_GOT>(expr) || 1146 (rel == ctx.target->symbolicRel && !sym.isPreemptible)) { 1147 addRelativeReloc<true>(ctx, *sec, offset, sym, addend, expr, type); 1148 return; 1149 } 1150 if (rel != 0) { 1151 if (ctx.arg.emachine == EM_MIPS && rel == ctx.target->symbolicRel) 1152 rel = ctx.target->relativeRel; 1153 std::lock_guard<std::mutex> lock(ctx.relocMutex); 1154 Partition &part = sec->getPartition(ctx); 1155 if (ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64) { 1156 // For a preemptible symbol, we can't use a relative relocation. For an 1157 // undefined symbol, we can't compute offset at link-time and use a 1158 // relative relocation. Use a symbolic relocation instead. 1159 if (sym.isPreemptible) { 1160 part.relaDyn->addSymbolReloc(type, *sec, offset, sym, addend, type); 1161 } else if (part.relrAuthDyn && sec->addralign >= 2 && offset % 2 == 0) { 1162 // When symbol values are determined in 1163 // finalizeAddressDependentContent, some .relr.auth.dyn relocations 1164 // may be moved to .rela.dyn. 1165 sec->addReloc({expr, type, offset, addend, &sym}); 1166 part.relrAuthDyn->relocs.push_back({sec, sec->relocs().size() - 1}); 1167 } else { 1168 part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, sec, offset, 1169 DynamicReloc::AddendOnlyWithTargetVA, sym, 1170 addend, R_ABS}); 1171 } 1172 return; 1173 } 1174 part.relaDyn->addSymbolReloc(rel, *sec, offset, sym, addend, type); 1175 1176 // MIPS ABI turns using of GOT and dynamic relocations inside out. 1177 // While regular ABI uses dynamic relocations to fill up GOT entries 1178 // MIPS ABI requires dynamic linker to fills up GOT entries using 1179 // specially sorted dynamic symbol table. This affects even dynamic 1180 // relocations against symbols which do not require GOT entries 1181 // creation explicitly, i.e. do not have any GOT-relocations. So if 1182 // a preemptible symbol has a dynamic relocation we anyway have 1183 // to create a GOT entry for it. 1184 // If a non-preemptible symbol has a dynamic relocation against it, 1185 // dynamic linker takes it st_value, adds offset and writes down 1186 // result of the dynamic relocation. In case of preemptible symbol 1187 // dynamic linker performs symbol resolution, writes the symbol value 1188 // to the GOT entry and reads the GOT entry when it needs to perform 1189 // a dynamic relocation. 1190 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19 1191 if (ctx.arg.emachine == EM_MIPS) 1192 ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr); 1193 return; 1194 } 1195 } 1196 1197 // When producing an executable, we can perform copy relocations (for 1198 // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO. 1199 // Copy relocations/canonical PLT entries are unsupported for 1200 // R_AARCH64_AUTH_ABS64. 1201 if (!ctx.arg.shared && sym.isShared() && 1202 !(ctx.arg.emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64)) { 1203 if (!canDefineSymbolInExecutable(ctx, sym)) { 1204 auto diag = Err(ctx); 1205 diag << "cannot preempt symbol: " << &sym; 1206 printLocation(diag, *sec, sym, offset); 1207 return; 1208 } 1209 1210 if (sym.isObject()) { 1211 // Produce a copy relocation. 1212 if (auto *ss = dyn_cast<SharedSymbol>(&sym)) { 1213 if (!ctx.arg.zCopyreloc) { 1214 auto diag = Err(ctx); 1215 diag << "unresolvable relocation " << type << " against symbol '" 1216 << ss << "'; recompile with -fPIC or remove '-z nocopyreloc'"; 1217 printLocation(diag, *sec, sym, offset); 1218 } 1219 sym.setFlags(NEEDS_COPY); 1220 } 1221 sec->addReloc({expr, type, offset, addend, &sym}); 1222 return; 1223 } 1224 1225 // This handles a non PIC program call to function in a shared library. In 1226 // an ideal world, we could just report an error saying the relocation can 1227 // overflow at runtime. In the real world with glibc, crt1.o has a 1228 // R_X86_64_PC32 pointing to libc.so. 1229 // 1230 // The general idea on how to handle such cases is to create a PLT entry and 1231 // use that as the function value. 1232 // 1233 // For the static linking part, we just return a plt expr and everything 1234 // else will use the PLT entry as the address. 1235 // 1236 // The remaining problem is making sure pointer equality still works. We 1237 // need the help of the dynamic linker for that. We let it know that we have 1238 // a direct reference to a so symbol by creating an undefined symbol with a 1239 // non zero st_value. Seeing that, the dynamic linker resolves the symbol to 1240 // the value of the symbol we created. This is true even for got entries, so 1241 // pointer equality is maintained. To avoid an infinite loop, the only entry 1242 // that points to the real function is a dedicated got entry used by the 1243 // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT, 1244 // R_386_JMP_SLOT, etc). 1245 1246 // For position independent executable on i386, the plt entry requires ebx 1247 // to be set. This causes two problems: 1248 // * If some code has a direct reference to a function, it was probably 1249 // compiled without -fPIE/-fPIC and doesn't maintain ebx. 1250 // * If a library definition gets preempted to the executable, it will have 1251 // the wrong ebx value. 1252 if (sym.isFunc()) { 1253 if (ctx.arg.pie && ctx.arg.emachine == EM_386) { 1254 auto diag = Err(ctx); 1255 diag << "symbol '" << &sym 1256 << "' cannot be preempted; recompile with -fPIE"; 1257 printLocation(diag, *sec, sym, offset); 1258 } 1259 sym.setFlags(NEEDS_COPY | NEEDS_PLT); 1260 sec->addReloc({expr, type, offset, addend, &sym}); 1261 return; 1262 } 1263 } 1264 1265 auto diag = Err(ctx); 1266 diag << "relocation " << type << " cannot be used against "; 1267 if (sym.getName().empty()) 1268 diag << "local symbol"; 1269 else 1270 diag << "symbol '" << &sym << "'"; 1271 diag << "; recompile with -fPIC"; 1272 printLocation(diag, *sec, sym, offset); 1273 } 1274 1275 // This function is similar to the `handleTlsRelocation`. MIPS does not 1276 // support any relaxations for TLS relocations so by factoring out MIPS 1277 // handling in to the separate function we can simplify the code and do not 1278 // pollute other `handleTlsRelocation` by MIPS `ifs` statements. 1279 // Mips has a custom MipsGotSection that handles the writing of GOT entries 1280 // without dynamic relocations. 1281 static unsigned handleMipsTlsRelocation(Ctx &ctx, RelType type, Symbol &sym, 1282 InputSectionBase &c, uint64_t offset, 1283 int64_t addend, RelExpr expr) { 1284 if (expr == RE_MIPS_TLSLD) { 1285 ctx.in.mipsGot->addTlsIndex(*c.file); 1286 c.addReloc({expr, type, offset, addend, &sym}); 1287 return 1; 1288 } 1289 if (expr == RE_MIPS_TLSGD) { 1290 ctx.in.mipsGot->addDynTlsEntry(*c.file, sym); 1291 c.addReloc({expr, type, offset, addend, &sym}); 1292 return 1; 1293 } 1294 return 0; 1295 } 1296 1297 static unsigned handleAArch64PAuthTlsRelocation(InputSectionBase *sec, 1298 RelExpr expr, RelType type, 1299 uint64_t offset, Symbol &sym, 1300 int64_t addend) { 1301 // Do not optimize signed TLSDESC to LE/IE (as described in pauthabielf64). 1302 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#general-restrictions 1303 // > PAUTHELF64 only supports the descriptor based TLS (TLSDESC). 1304 if (oneof<RE_AARCH64_AUTH_TLSDESC_PAGE, RE_AARCH64_AUTH_TLSDESC>(expr)) { 1305 sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_AUTH); 1306 sec->addReloc({expr, type, offset, addend, &sym}); 1307 return 1; 1308 } 1309 1310 // TLSDESC_CALL hint relocation should not be emitted by compiler with signed 1311 // TLSDESC enabled. 1312 if (expr == R_TLSDESC_CALL) 1313 sym.setFlags(NEEDS_TLSDESC_NONAUTH); 1314 1315 return 0; 1316 } 1317 1318 // Notes about General Dynamic and Local Dynamic TLS models below. They may 1319 // require the generation of a pair of GOT entries that have associated dynamic 1320 // relocations. The pair of GOT entries created are of the form GOT[e0] Module 1321 // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of 1322 // symbol in TLS block. 1323 // 1324 // Returns the number of relocations processed. 1325 unsigned RelocationScanner::handleTlsRelocation(RelExpr expr, RelType type, 1326 uint64_t offset, Symbol &sym, 1327 int64_t addend) { 1328 bool isAArch64 = ctx.arg.emachine == EM_AARCH64; 1329 1330 if (isAArch64) 1331 if (unsigned processed = handleAArch64PAuthTlsRelocation( 1332 sec, expr, type, offset, sym, addend)) 1333 return processed; 1334 1335 if (expr == R_TPREL || expr == R_TPREL_NEG) { 1336 if (ctx.arg.shared) { 1337 auto diag = Err(ctx); 1338 diag << "relocation " << type << " against " << &sym 1339 << " cannot be used with -shared"; 1340 printLocation(diag, *sec, sym, offset); 1341 return 1; 1342 } 1343 return 0; 1344 } 1345 1346 if (ctx.arg.emachine == EM_MIPS) 1347 return handleMipsTlsRelocation(ctx, type, sym, *sec, offset, addend, expr); 1348 1349 // LoongArch does not yet implement transition from TLSDESC to LE/IE, so 1350 // generate TLSDESC dynamic relocation for the dynamic linker to handle. 1351 if (ctx.arg.emachine == EM_LOONGARCH && 1352 oneof<RE_LOONGARCH_TLSDESC_PAGE_PC, R_TLSDESC, R_TLSDESC_PC, 1353 R_TLSDESC_CALL>(expr)) { 1354 if (expr != R_TLSDESC_CALL) { 1355 sym.setFlags(NEEDS_TLSDESC); 1356 sec->addReloc({expr, type, offset, addend, &sym}); 1357 } 1358 return 1; 1359 } 1360 1361 bool isRISCV = ctx.arg.emachine == EM_RISCV; 1362 1363 if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, 1364 R_TLSDESC_GOTPLT>(expr) && 1365 ctx.arg.shared) { 1366 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not 1367 // set NEEDS_TLSDESC on the label. 1368 if (expr != R_TLSDESC_CALL) { 1369 if (isAArch64) 1370 sym.setFlags(NEEDS_TLSDESC | NEEDS_TLSDESC_NONAUTH); 1371 else if (!isRISCV || type == R_RISCV_TLSDESC_HI20) 1372 sym.setFlags(NEEDS_TLSDESC); 1373 sec->addReloc({expr, type, offset, addend, &sym}); 1374 } 1375 return 1; 1376 } 1377 1378 // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE 1379 // optimizations. 1380 // RISC-V supports TLSDESC to IE/LE optimizations. 1381 // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable 1382 // optimization as well. 1383 bool execOptimize = 1384 !ctx.arg.shared && ctx.arg.emachine != EM_ARM && 1385 ctx.arg.emachine != EM_HEXAGON && ctx.arg.emachine != EM_LOONGARCH && 1386 !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) && 1387 !sec->file->ppc64DisableTLSRelax; 1388 1389 // If we are producing an executable and the symbol is non-preemptable, it 1390 // must be defined and the code sequence can be optimized to use 1391 // Local-Exesec-> 1392 // 1393 // ARM and RISC-V do not support any relaxations for TLS relocations, however, 1394 // we can omit the DTPMOD dynamic relocations and resolve them at link time 1395 // because them are always 1. This may be necessary for static linking as 1396 // DTPMOD may not be expected at load time. 1397 bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared; 1398 1399 // Local Dynamic is for access to module local TLS variables, while still 1400 // being suitable for being dynamically loaded via dlopen. GOT[e0] is the 1401 // module index, with a special value of 0 for the current module. GOT[e1] is 1402 // unused. There only needs to be one module index entry. 1403 if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) { 1404 // Local-Dynamic relocs can be optimized to Local-Exesec-> 1405 if (execOptimize) { 1406 sec->addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), 1407 type, offset, addend, &sym}); 1408 return ctx.target->getTlsGdRelaxSkip(type); 1409 } 1410 if (expr == R_TLSLD_HINT) 1411 return 1; 1412 ctx.needsTlsLd.store(true, std::memory_order_relaxed); 1413 sec->addReloc({expr, type, offset, addend, &sym}); 1414 return 1; 1415 } 1416 1417 // Local-Dynamic relocs can be optimized to Local-Exesec-> 1418 if (expr == R_DTPREL) { 1419 if (execOptimize) 1420 expr = ctx.target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE); 1421 sec->addReloc({expr, type, offset, addend, &sym}); 1422 return 1; 1423 } 1424 1425 // Local-Dynamic sequence where offset of tls variable relative to dynamic 1426 // thread pointer is stored in the got. This cannot be optimized to 1427 // Local-Exesec-> 1428 if (expr == R_TLSLD_GOT_OFF) { 1429 sym.setFlags(NEEDS_GOT_DTPREL); 1430 sec->addReloc({expr, type, offset, addend, &sym}); 1431 return 1; 1432 } 1433 1434 if (oneof<RE_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, 1435 R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC, 1436 RE_LOONGARCH_TLSGD_PAGE_PC>(expr)) { 1437 if (!execOptimize) { 1438 sym.setFlags(NEEDS_TLSGD); 1439 sec->addReloc({expr, type, offset, addend, &sym}); 1440 return 1; 1441 } 1442 1443 // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec 1444 // depending on the symbol being locally defined or not. 1445 // 1446 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible 1447 // label, so TLSDESC=>IE will be categorized as R_RELAX_TLS_GD_TO_LE. We fix 1448 // the categorization in RISCV::relocateAllosec-> 1449 if (sym.isPreemptible) { 1450 sym.setFlags(NEEDS_TLSGD_TO_IE); 1451 sec->addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), 1452 type, offset, addend, &sym}); 1453 } else { 1454 sec->addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), 1455 type, offset, addend, &sym}); 1456 } 1457 return ctx.target->getTlsGdRelaxSkip(type); 1458 } 1459 1460 if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, RE_AARCH64_GOT_PAGE_PC, 1461 RE_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) { 1462 ctx.hasTlsIe.store(true, std::memory_order_relaxed); 1463 // Initial-Exec relocs can be optimized to Local-Exec if the symbol is 1464 // locally defined. This is not supported on SystemZ. 1465 if (execOptimize && isLocalInExecutable && ctx.arg.emachine != EM_S390) { 1466 sec->addReloc({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym}); 1467 } else if (expr != R_TLSIE_HINT) { 1468 sym.setFlags(NEEDS_TLSIE); 1469 // R_GOT needs a relative relocation for PIC on i386 and Hexagon. 1470 if (expr == R_GOT && ctx.arg.isPic && 1471 !ctx.target->usesOnlyLowPageBits(type)) 1472 addRelativeReloc<true>(ctx, *sec, offset, sym, addend, expr, type); 1473 else 1474 sec->addReloc({expr, type, offset, addend, &sym}); 1475 } 1476 return 1; 1477 } 1478 1479 return 0; 1480 } 1481 1482 template <class ELFT, class RelTy> 1483 void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) { 1484 const RelTy &rel = *i; 1485 uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL); 1486 Symbol &sym = sec->getFile<ELFT>()->getSymbol(symIndex); 1487 RelType type; 1488 if constexpr (ELFT::Is64Bits || RelTy::IsCrel) { 1489 type = rel.getType(ctx.arg.isMips64EL); 1490 ++i; 1491 } else { 1492 // CREL is unsupported for MIPS N32. 1493 if (ctx.arg.mipsN32Abi) { 1494 type = getMipsN32RelType(i); 1495 } else { 1496 type = rel.getType(ctx.arg.isMips64EL); 1497 ++i; 1498 } 1499 } 1500 // Get an offset in an output section this relocation is applied to. 1501 uint64_t offset = getter.get(ctx, rel.r_offset); 1502 if (offset == uint64_t(-1)) 1503 return; 1504 1505 RelExpr expr = 1506 ctx.target->getRelExpr(type, sym, sec->content().data() + offset); 1507 int64_t addend = RelTy::HasAddend 1508 ? getAddend<ELFT>(rel) 1509 : ctx.target->getImplicitAddend( 1510 sec->content().data() + rel.r_offset, type); 1511 if (LLVM_UNLIKELY(ctx.arg.emachine == EM_MIPS)) 1512 addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal()); 1513 else if (ctx.arg.emachine == EM_PPC64 && ctx.arg.isPic && type == R_PPC64_TOC) 1514 addend += getPPC64TocBase(ctx); 1515 1516 // Ignore R_*_NONE and other marker relocations. 1517 if (expr == R_NONE) 1518 return; 1519 1520 // Error if the target symbol is undefined. Symbol index 0 may be used by 1521 // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them. 1522 if (sym.isUndefined() && symIndex != 0 && 1523 maybeReportUndefined(ctx, cast<Undefined>(sym), *sec, offset)) 1524 return; 1525 1526 if (ctx.arg.emachine == EM_PPC64) { 1527 // We can separate the small code model relocations into 2 categories: 1528 // 1) Those that access the compiler generated .toc sections. 1529 // 2) Those that access the linker allocated got entries. 1530 // lld allocates got entries to symbols on demand. Since we don't try to 1531 // sort the got entries in any way, we don't have to track which objects 1532 // have got-based small code model relocs. The .toc sections get placed 1533 // after the end of the linker allocated .got section and we do sort those 1534 // so sections addressed with small code model relocations come first. 1535 if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS) 1536 sec->file->ppc64SmallCodeModelTocRelocs = true; 1537 1538 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in 1539 // InputSectionBase::relocateAlloc(). 1540 if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(sym) && 1541 cast<Defined>(sym).section->name == ".toc") 1542 ctx.ppc64noTocRelax.insert({&sym, addend}); 1543 1544 if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) || 1545 (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) { 1546 // Skip the error check for CREL, which does not set `end`. 1547 if constexpr (!RelTy::IsCrel) { 1548 if (i == end) { 1549 auto diag = Err(ctx); 1550 diag << "R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last " 1551 "relocation"; 1552 printLocation(diag, *sec, sym, offset); 1553 return; 1554 } 1555 } 1556 1557 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC 1558 // case, so we can discern it later from the toc-case. 1559 if (i->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC) 1560 ++offset; 1561 } 1562 } 1563 1564 // If the relocation does not emit a GOT or GOTPLT entry but its computation 1565 // uses their addresses, we need GOT or GOTPLT to be created. 1566 // 1567 // The 5 types that relative GOTPLT are all x86 and x86-64 specific. 1568 if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT, 1569 R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) { 1570 ctx.in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed); 1571 } else if (oneof<R_GOTONLY_PC, R_GOTREL, RE_PPC32_PLTREL, RE_PPC64_TOCBASE, 1572 RE_PPC64_RELAX_TOC>(expr)) { 1573 ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed); 1574 } 1575 1576 // Process TLS relocations, including TLS optimizations. Note that 1577 // R_TPREL and R_TPREL_NEG relocations are resolved in processAux. 1578 // 1579 // Some RISCV TLSDESC relocations reference a local NOTYPE symbol, 1580 // but we need to process them in handleTlsRelocation. 1581 if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) { 1582 if (unsigned processed = 1583 handleTlsRelocation(expr, type, offset, sym, addend)) { 1584 i += processed - 1; 1585 return; 1586 } 1587 } 1588 1589 processAux(expr, type, offset, sym, addend); 1590 } 1591 1592 // R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for 1593 // General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is 1594 // found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the 1595 // instructions are generated by very old IBM XL compilers. Work around the 1596 // issue by disabling GD/LD to IE/LE relaxation. 1597 template <class RelTy> 1598 static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs<RelTy> rels) { 1599 // Skip if sec is synthetic (sec.file is null) or if sec has been marked. 1600 if (!sec.file || sec.file->ppc64DisableTLSRelax) 1601 return; 1602 bool hasGDLD = false; 1603 for (const RelTy &rel : rels) { 1604 RelType type = rel.getType(false); 1605 switch (type) { 1606 case R_PPC64_TLSGD: 1607 case R_PPC64_TLSLD: 1608 return; // Found a marker 1609 case R_PPC64_GOT_TLSGD16: 1610 case R_PPC64_GOT_TLSGD16_HA: 1611 case R_PPC64_GOT_TLSGD16_HI: 1612 case R_PPC64_GOT_TLSGD16_LO: 1613 case R_PPC64_GOT_TLSLD16: 1614 case R_PPC64_GOT_TLSLD16_HA: 1615 case R_PPC64_GOT_TLSLD16_HI: 1616 case R_PPC64_GOT_TLSLD16_LO: 1617 hasGDLD = true; 1618 break; 1619 } 1620 } 1621 if (hasGDLD) { 1622 sec.file->ppc64DisableTLSRelax = true; 1623 Warn(sec.file->ctx) 1624 << sec.file 1625 << ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations " 1626 "without " 1627 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations"; 1628 } 1629 } 1630 1631 template <class ELFT, class RelTy> 1632 void RelocationScanner::scan(Relocs<RelTy> rels) { 1633 // Not all relocations end up in Sec->Relocations, but a lot do. 1634 sec->relocations.reserve(rels.size()); 1635 1636 if (ctx.arg.emachine == EM_PPC64) 1637 checkPPC64TLSRelax<RelTy>(*sec, rels); 1638 1639 // For EhInputSection, OffsetGetter expects the relocations to be sorted by 1640 // r_offset. In rare cases (.eh_frame pieces are reordered by a linker 1641 // script), the relocations may be unordered. 1642 // On SystemZ, all sections need to be sorted by r_offset, to allow TLS 1643 // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip. 1644 SmallVector<RelTy, 0> storage; 1645 if (isa<EhInputSection>(sec) || ctx.arg.emachine == EM_S390) 1646 rels = sortRels(rels, storage); 1647 1648 if constexpr (RelTy::IsCrel) { 1649 for (auto i = rels.begin(); i != rels.end();) 1650 scanOne<ELFT, RelTy>(i); 1651 } else { 1652 // The non-CREL code path has additional check for PPC64 TLS. 1653 end = static_cast<const void *>(rels.end()); 1654 for (auto i = rels.begin(); i != end;) 1655 scanOne<ELFT, RelTy>(i); 1656 } 1657 1658 // Sort relocations by offset for more efficient searching for 1659 // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64. 1660 if (ctx.arg.emachine == EM_RISCV || 1661 (ctx.arg.emachine == EM_PPC64 && sec->name == ".toc")) 1662 llvm::stable_sort(sec->relocs(), 1663 [](const Relocation &lhs, const Relocation &rhs) { 1664 return lhs.offset < rhs.offset; 1665 }); 1666 } 1667 1668 template <class ELFT> 1669 void RelocationScanner::scanSection(InputSectionBase &s, bool isEH) { 1670 sec = &s; 1671 getter = OffsetGetter(s); 1672 const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>(!isEH); 1673 if (rels.areRelocsCrel()) 1674 scan<ELFT>(rels.crels); 1675 else if (rels.areRelocsRel()) 1676 scan<ELFT>(rels.rels); 1677 else 1678 scan<ELFT>(rels.relas); 1679 } 1680 1681 template <class ELFT> void elf::scanRelocations(Ctx &ctx) { 1682 // Scan all relocations. Each relocation goes through a series of tests to 1683 // determine if it needs special treatment, such as creating GOT, PLT, 1684 // copy relocations, etc. Note that relocations for non-alloc sections are 1685 // directly processed by InputSection::relocateNonAlloc. 1686 1687 // Deterministic parallellism needs sorting relocations which is unsuitable 1688 // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable 1689 // for parallelism. 1690 bool serial = !ctx.arg.zCombreloc || ctx.arg.emachine == EM_MIPS || 1691 ctx.arg.emachine == EM_PPC64; 1692 parallel::TaskGroup tg; 1693 auto outerFn = [&]() { 1694 for (ELFFileBase *f : ctx.objectFiles) { 1695 auto fn = [f, &ctx]() { 1696 RelocationScanner scanner(ctx); 1697 for (InputSectionBase *s : f->getSections()) { 1698 if (s && s->kind() == SectionBase::Regular && s->isLive() && 1699 (s->flags & SHF_ALLOC) && 1700 !(s->type == SHT_ARM_EXIDX && ctx.arg.emachine == EM_ARM)) 1701 scanner.template scanSection<ELFT>(*s); 1702 } 1703 }; 1704 if (serial) 1705 fn(); 1706 else 1707 tg.spawn(fn); 1708 } 1709 auto scanEH = [&] { 1710 RelocationScanner scanner(ctx); 1711 for (Partition &part : ctx.partitions) { 1712 for (EhInputSection *sec : part.ehFrame->sections) 1713 scanner.template scanSection<ELFT>(*sec, /*isEH=*/true); 1714 if (part.armExidx && part.armExidx->isLive()) 1715 for (InputSection *sec : part.armExidx->exidxSections) 1716 if (sec->isLive()) 1717 scanner.template scanSection<ELFT>(*sec); 1718 } 1719 }; 1720 if (serial) 1721 scanEH(); 1722 else 1723 tg.spawn(scanEH); 1724 }; 1725 // If `serial` is true, call `spawn` to ensure that `scanner` runs in a thread 1726 // with valid getThreadIndex(). 1727 if (serial) 1728 tg.spawn(outerFn); 1729 else 1730 outerFn(); 1731 } 1732 1733 RelocationBaseSection &elf::getIRelativeSection(Ctx &ctx) { 1734 // Prior to Android V, there was a bug that caused RELR relocations to be 1735 // applied after packed relocations. This meant that resolvers referenced by 1736 // IRELATIVE relocations in the packed relocation section would read 1737 // unrelocated globals with RELR relocations when 1738 // --pack-relative-relocs=android+relr is enabled. Work around this by placing 1739 // IRELATIVE in .rela.plt. 1740 return ctx.arg.androidPackDynRelocs ? *ctx.in.relaPlt 1741 : *ctx.mainPart->relaDyn; 1742 } 1743 1744 static bool handleNonPreemptibleIfunc(Ctx &ctx, Symbol &sym, uint16_t flags) { 1745 // Handle a reference to a non-preemptible ifunc. These are special in a 1746 // few ways: 1747 // 1748 // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have 1749 // a fixed value. But assuming that all references to the ifunc are 1750 // GOT-generating or PLT-generating, the handling of an ifunc is 1751 // relatively straightforward. We create a PLT entry in Iplt, which is 1752 // usually at the end of .plt, which makes an indirect call using a 1753 // matching GOT entry in igotPlt, which is usually at the end of .got.plt. 1754 // The GOT entry is relocated using an IRELATIVE relocation in relaDyn, 1755 // which is usually at the end of .rela.dyn. 1756 // 1757 // - Despite the fact that an ifunc does not have a fixed value, compilers 1758 // that are not passed -fPIC will assume that they do, and will emit 1759 // direct (non-GOT-generating, non-PLT-generating) relocations to the 1760 // symbol. This means that if a direct relocation to the symbol is 1761 // seen, the linker must set a value for the symbol, and this value must 1762 // be consistent no matter what type of reference is made to the symbol. 1763 // This can be done by creating a PLT entry for the symbol in the way 1764 // described above and making it canonical, that is, making all references 1765 // point to the PLT entry instead of the resolver. In lld we also store 1766 // the address of the PLT entry in the dynamic symbol table, which means 1767 // that the symbol will also have the same value in other modules. 1768 // Because the value loaded from the GOT needs to be consistent with 1769 // the value computed using a direct relocation, a non-preemptible ifunc 1770 // may end up with two GOT entries, one in .got.plt that points to the 1771 // address returned by the resolver and is used only by the PLT entry, 1772 // and another in .got that points to the PLT entry and is used by 1773 // GOT-generating relocations. 1774 // 1775 // - The fact that these symbols do not have a fixed value makes them an 1776 // exception to the general rule that a statically linked executable does 1777 // not require any form of dynamic relocation. To handle these relocations 1778 // correctly, the IRELATIVE relocations are stored in an array which a 1779 // statically linked executable's startup code must enumerate using the 1780 // linker-defined symbols __rela?_iplt_{start,end}. 1781 if (!sym.isGnuIFunc() || sym.isPreemptible || ctx.arg.zIfuncNoplt) 1782 return false; 1783 // Skip unreferenced non-preemptible ifunc. 1784 if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC))) 1785 return true; 1786 1787 sym.isInIplt = true; 1788 1789 // Create an Iplt and the associated IRELATIVE relocation pointing to the 1790 // original section/value pairs. For non-GOT non-PLT relocation case below, we 1791 // may alter section/value, so create a copy of the symbol to make 1792 // section/value fixed. 1793 auto *directSym = makeDefined(cast<Defined>(sym)); 1794 directSym->allocateAux(ctx); 1795 auto &dyn = getIRelativeSection(ctx); 1796 addPltEntry(ctx, *ctx.in.iplt, *ctx.in.igotPlt, dyn, ctx.target->iRelativeRel, 1797 *directSym); 1798 sym.allocateAux(ctx); 1799 ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx; 1800 1801 if (flags & HAS_DIRECT_RELOC) { 1802 // Change the value to the IPLT and redirect all references to it. 1803 auto &d = cast<Defined>(sym); 1804 d.section = ctx.in.iplt.get(); 1805 d.value = d.getPltIdx(ctx) * ctx.target->ipltEntrySize; 1806 d.size = 0; 1807 // It's important to set the symbol type here so that dynamic loaders 1808 // don't try to call the PLT as if it were an ifunc resolver. 1809 d.type = STT_FUNC; 1810 1811 if (flags & NEEDS_GOT) { 1812 assert(!(flags & NEEDS_GOT_AUTH) && 1813 "R_AARCH64_AUTH_IRELATIVE is not supported yet"); 1814 addGotEntry(ctx, sym); 1815 } 1816 } else if (flags & NEEDS_GOT) { 1817 // Redirect GOT accesses to point to the Igot. 1818 sym.gotInIgot = true; 1819 } 1820 return true; 1821 } 1822 1823 void elf::postScanRelocations(Ctx &ctx) { 1824 auto fn = [&](Symbol &sym) { 1825 auto flags = sym.flags.load(std::memory_order_relaxed); 1826 if (handleNonPreemptibleIfunc(ctx, sym, flags)) 1827 return; 1828 1829 if (sym.isTagged() && sym.isDefined()) 1830 ctx.mainPart->memtagGlobalDescriptors->addSymbol(sym); 1831 1832 if (!sym.needsDynReloc()) 1833 return; 1834 sym.allocateAux(ctx); 1835 1836 if (flags & NEEDS_GOT) { 1837 if ((flags & NEEDS_GOT_AUTH) && (flags & NEEDS_GOT_NONAUTH)) { 1838 auto diag = Err(ctx); 1839 diag << "both AUTH and non-AUTH GOT entries for '" << sym.getName() 1840 << "' requested, but only one type of GOT entry per symbol is " 1841 "supported"; 1842 return; 1843 } 1844 if (flags & NEEDS_GOT_AUTH) 1845 addGotAuthEntry(ctx, sym); 1846 else 1847 addGotEntry(ctx, sym); 1848 } 1849 if (flags & NEEDS_PLT) 1850 addPltEntry(ctx, *ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt, 1851 ctx.target->pltRel, sym); 1852 if (flags & NEEDS_COPY) { 1853 if (sym.isObject()) { 1854 invokeELFT(addCopyRelSymbol, ctx, cast<SharedSymbol>(sym)); 1855 // NEEDS_COPY is cleared for sym and its aliases so that in 1856 // later iterations aliases won't cause redundant copies. 1857 assert(!sym.hasFlag(NEEDS_COPY)); 1858 } else { 1859 assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT)); 1860 if (!sym.isDefined()) { 1861 replaceWithDefined(ctx, sym, *ctx.in.plt, 1862 ctx.target->pltHeaderSize + 1863 ctx.target->pltEntrySize * sym.getPltIdx(ctx), 1864 0); 1865 sym.setFlags(NEEDS_COPY); 1866 if (ctx.arg.emachine == EM_PPC) { 1867 // PPC32 canonical PLT entries are at the beginning of .glink 1868 cast<Defined>(sym).value = ctx.in.plt->headerSize; 1869 ctx.in.plt->headerSize += 16; 1870 cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts.push_back(&sym); 1871 } 1872 } 1873 } 1874 } 1875 1876 if (!sym.isTls()) 1877 return; 1878 bool isLocalInExecutable = !sym.isPreemptible && !ctx.arg.shared; 1879 GotSection *got = ctx.in.got.get(); 1880 1881 if (flags & NEEDS_TLSDESC) { 1882 if ((flags & NEEDS_TLSDESC_AUTH) && (flags & NEEDS_TLSDESC_NONAUTH)) { 1883 Err(ctx) 1884 << "both AUTH and non-AUTH TLSDESC entries for '" << sym.getName() 1885 << "' requested, but only one type of TLSDESC entry per symbol is " 1886 "supported"; 1887 return; 1888 } 1889 got->addTlsDescEntry(sym); 1890 RelType tlsDescRel = ctx.target->tlsDescRel; 1891 if (flags & NEEDS_TLSDESC_AUTH) { 1892 got->addTlsDescAuthEntry(); 1893 tlsDescRel = ELF::R_AARCH64_AUTH_TLSDESC; 1894 } 1895 ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( 1896 tlsDescRel, *got, got->getTlsDescOffset(sym), sym, tlsDescRel); 1897 } 1898 if (flags & NEEDS_TLSGD) { 1899 got->addDynTlsEntry(sym); 1900 uint64_t off = got->getGlobalDynOffset(sym); 1901 if (isLocalInExecutable) 1902 // Write one to the GOT slot. 1903 got->addConstant({R_ADDEND, ctx.target->symbolicRel, off, 1, &sym}); 1904 else 1905 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsModuleIndexRel, 1906 *got, off, sym); 1907 1908 // If the symbol is preemptible we need the dynamic linker to write 1909 // the offset too. 1910 uint64_t offsetOff = off + ctx.arg.wordsize; 1911 if (sym.isPreemptible) 1912 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsOffsetRel, *got, 1913 offsetOff, sym); 1914 else 1915 got->addConstant({R_ABS, ctx.target->tlsOffsetRel, offsetOff, 0, &sym}); 1916 } 1917 if (flags & NEEDS_TLSGD_TO_IE) { 1918 got->addEntry(sym); 1919 ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsGotRel, *got, 1920 sym.getGotOffset(ctx), sym); 1921 } 1922 if (flags & NEEDS_GOT_DTPREL) { 1923 got->addEntry(sym); 1924 got->addConstant( 1925 {R_ABS, ctx.target->tlsOffsetRel, sym.getGotOffset(ctx), 0, &sym}); 1926 } 1927 1928 if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE)) 1929 addTpOffsetGotEntry(ctx, sym); 1930 }; 1931 1932 GotSection *got = ctx.in.got.get(); 1933 if (ctx.needsTlsLd.load(std::memory_order_relaxed) && got->addTlsIndex()) { 1934 static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0); 1935 if (ctx.arg.shared) 1936 ctx.mainPart->relaDyn->addReloc( 1937 {ctx.target->tlsModuleIndexRel, got, got->getTlsIndexOff()}); 1938 else 1939 got->addConstant({R_ADDEND, ctx.target->symbolicRel, 1940 got->getTlsIndexOff(), 1, &dummy}); 1941 } 1942 1943 assert(ctx.symAux.size() == 1); 1944 for (Symbol *sym : ctx.symtab->getSymbols()) 1945 fn(*sym); 1946 1947 // Local symbols may need the aforementioned non-preemptible ifunc and GOT 1948 // handling. They don't need regular PLT. 1949 for (ELFFileBase *file : ctx.objectFiles) 1950 for (Symbol *sym : file->getLocalSymbols()) 1951 fn(*sym); 1952 } 1953 1954 static bool mergeCmp(const InputSection *a, const InputSection *b) { 1955 // std::merge requires a strict weak ordering. 1956 if (a->outSecOff < b->outSecOff) 1957 return true; 1958 1959 // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection. 1960 if (a->outSecOff == b->outSecOff && a != b) { 1961 auto *ta = dyn_cast<ThunkSection>(a); 1962 auto *tb = dyn_cast<ThunkSection>(b); 1963 1964 // Check if Thunk is immediately before any specific Target 1965 // InputSection for example Mips LA25 Thunks. 1966 if (ta && ta->getTargetInputSection() == b) 1967 return true; 1968 1969 // Place Thunk Sections without specific targets before 1970 // non-Thunk Sections. 1971 if (ta && !tb && !ta->getTargetInputSection()) 1972 return true; 1973 } 1974 1975 return false; 1976 } 1977 1978 // Call Fn on every executable InputSection accessed via the linker script 1979 // InputSectionDescription::Sections. 1980 static void forEachInputSectionDescription( 1981 ArrayRef<OutputSection *> outputSections, 1982 llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) { 1983 for (OutputSection *os : outputSections) { 1984 if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR)) 1985 continue; 1986 for (SectionCommand *bc : os->commands) 1987 if (auto *isd = dyn_cast<InputSectionDescription>(bc)) 1988 fn(os, isd); 1989 } 1990 } 1991 1992 ThunkCreator::ThunkCreator(Ctx &ctx) : ctx(ctx) {} 1993 1994 ThunkCreator::~ThunkCreator() {} 1995 1996 // Thunk Implementation 1997 // 1998 // Thunks (sometimes called stubs, veneers or branch islands) are small pieces 1999 // of code that the linker inserts inbetween a caller and a callee. The thunks 2000 // are added at link time rather than compile time as the decision on whether 2001 // a thunk is needed, such as the caller and callee being out of range, can only 2002 // be made at link time. 2003 // 2004 // It is straightforward to tell given the current state of the program when a 2005 // thunk is needed for a particular call. The more difficult part is that 2006 // the thunk needs to be placed in the program such that the caller can reach 2007 // the thunk and the thunk can reach the callee; furthermore, adding thunks to 2008 // the program alters addresses, which can mean more thunks etc. 2009 // 2010 // In lld we have a synthetic ThunkSection that can hold many Thunks. 2011 // The decision to have a ThunkSection act as a container means that we can 2012 // more easily handle the most common case of a single block of contiguous 2013 // Thunks by inserting just a single ThunkSection. 2014 // 2015 // The implementation of Thunks in lld is split across these areas 2016 // Relocations.cpp : Framework for creating and placing thunks 2017 // Thunks.cpp : The code generated for each supported thunk 2018 // Target.cpp : Target specific hooks that the framework uses to decide when 2019 // a thunk is used 2020 // Synthetic.cpp : Implementation of ThunkSection 2021 // Writer.cpp : Iteratively call framework until no more Thunks added 2022 // 2023 // Thunk placement requirements: 2024 // Mips LA25 thunks. These must be placed immediately before the callee section 2025 // We can assume that the caller is in range of the Thunk. These are modelled 2026 // by Thunks that return the section they must precede with 2027 // getTargetInputSection(). 2028 // 2029 // ARM interworking and range extension thunks. These thunks must be placed 2030 // within range of the caller. All implemented ARM thunks can always reach the 2031 // callee as they use an indirect jump via a register that has no range 2032 // restrictions. 2033 // 2034 // Thunk placement algorithm: 2035 // For Mips LA25 ThunkSections; the placement is explicit, it has to be before 2036 // getTargetInputSection(). 2037 // 2038 // For thunks that must be placed within range of the caller there are many 2039 // possible choices given that the maximum range from the caller is usually 2040 // much larger than the average InputSection size. Desirable properties include: 2041 // - Maximize reuse of thunks by multiple callers 2042 // - Minimize number of ThunkSections to simplify insertion 2043 // - Handle impact of already added Thunks on addresses 2044 // - Simple to understand and implement 2045 // 2046 // In lld for the first pass, we pre-create one or more ThunkSections per 2047 // InputSectionDescription at Target specific intervals. A ThunkSection is 2048 // placed so that the estimated end of the ThunkSection is within range of the 2049 // start of the InputSectionDescription or the previous ThunkSection. For 2050 // example: 2051 // InputSectionDescription 2052 // Section 0 2053 // ... 2054 // Section N 2055 // ThunkSection 0 2056 // Section N + 1 2057 // ... 2058 // Section N + K 2059 // Thunk Section 1 2060 // 2061 // The intention is that we can add a Thunk to a ThunkSection that is well 2062 // spaced enough to service a number of callers without having to do a lot 2063 // of work. An important principle is that it is not an error if a Thunk cannot 2064 // be placed in a pre-created ThunkSection; when this happens we create a new 2065 // ThunkSection placed next to the caller. This allows us to handle the vast 2066 // majority of thunks simply, but also handle rare cases where the branch range 2067 // is smaller than the target specific spacing. 2068 // 2069 // The algorithm is expected to create all the thunks that are needed in a 2070 // single pass, with a small number of programs needing a second pass due to 2071 // the insertion of thunks in the first pass increasing the offset between 2072 // callers and callees that were only just in range. 2073 // 2074 // A consequence of allowing new ThunkSections to be created outside of the 2075 // pre-created ThunkSections is that in rare cases calls to Thunks that were in 2076 // range in pass K, are out of range in some pass > K due to the insertion of 2077 // more Thunks in between the caller and callee. When this happens we retarget 2078 // the relocation back to the original target and create another Thunk. 2079 2080 // Remove ThunkSections that are empty, this should only be the initial set 2081 // precreated on pass 0. 2082 2083 // Insert the Thunks for OutputSection OS into their designated place 2084 // in the Sections vector, and recalculate the InputSection output section 2085 // offsets. 2086 // This may invalidate any output section offsets stored outside of InputSection 2087 void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) { 2088 forEachInputSectionDescription( 2089 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2090 if (isd->thunkSections.empty()) 2091 return; 2092 2093 // Remove any zero sized precreated Thunks. 2094 llvm::erase_if(isd->thunkSections, 2095 [](const std::pair<ThunkSection *, uint32_t> &ts) { 2096 return ts.first->getSize() == 0; 2097 }); 2098 2099 // ISD->ThunkSections contains all created ThunkSections, including 2100 // those inserted in previous passes. Extract the Thunks created this 2101 // pass and order them in ascending outSecOff. 2102 std::vector<ThunkSection *> newThunks; 2103 for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections) 2104 if (ts.second == pass) 2105 newThunks.push_back(ts.first); 2106 llvm::stable_sort(newThunks, 2107 [](const ThunkSection *a, const ThunkSection *b) { 2108 return a->outSecOff < b->outSecOff; 2109 }); 2110 2111 // Merge sorted vectors of Thunks and InputSections by outSecOff 2112 SmallVector<InputSection *, 0> tmp; 2113 tmp.reserve(isd->sections.size() + newThunks.size()); 2114 2115 std::merge(isd->sections.begin(), isd->sections.end(), 2116 newThunks.begin(), newThunks.end(), std::back_inserter(tmp), 2117 mergeCmp); 2118 2119 isd->sections = std::move(tmp); 2120 }); 2121 } 2122 2123 static int64_t getPCBias(Ctx &ctx, RelType type) { 2124 if (ctx.arg.emachine != EM_ARM) 2125 return 0; 2126 switch (type) { 2127 case R_ARM_THM_JUMP19: 2128 case R_ARM_THM_JUMP24: 2129 case R_ARM_THM_CALL: 2130 return 4; 2131 default: 2132 return 8; 2133 } 2134 } 2135 2136 // Find or create a ThunkSection within the InputSectionDescription (ISD) that 2137 // is in range of Src. An ISD maps to a range of InputSections described by a 2138 // linker script section pattern such as { .text .text.* }. 2139 ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os, 2140 InputSection *isec, 2141 InputSectionDescription *isd, 2142 const Relocation &rel, 2143 uint64_t src) { 2144 // See the comment in getThunk for -pcBias below. 2145 const int64_t pcBias = getPCBias(ctx, rel.type); 2146 for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) { 2147 ThunkSection *ts = tp.first; 2148 uint64_t tsBase = os->addr + ts->outSecOff - pcBias; 2149 uint64_t tsLimit = tsBase + ts->getSize(); 2150 if (ctx.target->inBranchRange(rel.type, src, 2151 (src > tsLimit) ? tsBase : tsLimit)) 2152 return ts; 2153 } 2154 2155 // No suitable ThunkSection exists. This can happen when there is a branch 2156 // with lower range than the ThunkSection spacing or when there are too 2157 // many Thunks. Create a new ThunkSection as close to the InputSection as 2158 // possible. Error if InputSection is so large we cannot place ThunkSection 2159 // anywhere in Range. 2160 uint64_t thunkSecOff = isec->outSecOff; 2161 if (!ctx.target->inBranchRange(rel.type, src, 2162 os->addr + thunkSecOff + rel.addend)) { 2163 thunkSecOff = isec->outSecOff + isec->getSize(); 2164 if (!ctx.target->inBranchRange(rel.type, src, 2165 os->addr + thunkSecOff + rel.addend)) 2166 Fatal(ctx) << "InputSection too large for range extension thunk " 2167 << isec->getObjMsg(src - (os->addr << isec->outSecOff)); 2168 } 2169 return addThunkSection(os, isd, thunkSecOff); 2170 } 2171 2172 // Add a Thunk that needs to be placed in a ThunkSection that immediately 2173 // precedes its Target. 2174 ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) { 2175 ThunkSection *ts = thunkedSections.lookup(isec); 2176 if (ts) 2177 return ts; 2178 2179 // Find InputSectionRange within Target Output Section (TOS) that the 2180 // InputSection (IS) that we need to precede is in. 2181 OutputSection *tos = isec->getParent(); 2182 for (SectionCommand *bc : tos->commands) { 2183 auto *isd = dyn_cast<InputSectionDescription>(bc); 2184 if (!isd || isd->sections.empty()) 2185 continue; 2186 2187 InputSection *first = isd->sections.front(); 2188 InputSection *last = isd->sections.back(); 2189 2190 if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff) 2191 continue; 2192 2193 ts = addThunkSection(tos, isd, isec->outSecOff); 2194 thunkedSections[isec] = ts; 2195 return ts; 2196 } 2197 2198 return nullptr; 2199 } 2200 2201 // Create one or more ThunkSections per OS that can be used to place Thunks. 2202 // We attempt to place the ThunkSections using the following desirable 2203 // properties: 2204 // - Within range of the maximum number of callers 2205 // - Minimise the number of ThunkSections 2206 // 2207 // We follow a simple but conservative heuristic to place ThunkSections at 2208 // offsets that are multiples of a Target specific branch range. 2209 // For an InputSectionDescription that is smaller than the range, a single 2210 // ThunkSection at the end of the range will do. 2211 // 2212 // For an InputSectionDescription that is more than twice the size of the range, 2213 // we place the last ThunkSection at range bytes from the end of the 2214 // InputSectionDescription in order to increase the likelihood that the 2215 // distance from a thunk to its target will be sufficiently small to 2216 // allow for the creation of a short thunk. 2217 void ThunkCreator::createInitialThunkSections( 2218 ArrayRef<OutputSection *> outputSections) { 2219 uint32_t thunkSectionSpacing = ctx.target->getThunkSectionSpacing(); 2220 forEachInputSectionDescription( 2221 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2222 if (isd->sections.empty()) 2223 return; 2224 2225 uint32_t isdBegin = isd->sections.front()->outSecOff; 2226 uint32_t isdEnd = 2227 isd->sections.back()->outSecOff + isd->sections.back()->getSize(); 2228 uint32_t lastThunkLowerBound = -1; 2229 if (isdEnd - isdBegin > thunkSectionSpacing * 2) 2230 lastThunkLowerBound = isdEnd - thunkSectionSpacing; 2231 2232 uint32_t isecLimit; 2233 uint32_t prevIsecLimit = isdBegin; 2234 uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing; 2235 2236 for (const InputSection *isec : isd->sections) { 2237 isecLimit = isec->outSecOff + isec->getSize(); 2238 if (isecLimit > thunkUpperBound) { 2239 addThunkSection(os, isd, prevIsecLimit); 2240 thunkUpperBound = prevIsecLimit + thunkSectionSpacing; 2241 } 2242 if (isecLimit > lastThunkLowerBound) 2243 break; 2244 prevIsecLimit = isecLimit; 2245 } 2246 addThunkSection(os, isd, isecLimit); 2247 }); 2248 } 2249 2250 ThunkSection *ThunkCreator::addThunkSection(OutputSection *os, 2251 InputSectionDescription *isd, 2252 uint64_t off) { 2253 auto *ts = make<ThunkSection>(ctx, os, off); 2254 ts->partition = os->partition; 2255 if ((ctx.arg.fixCortexA53Errata843419 || ctx.arg.fixCortexA8) && 2256 !isd->sections.empty()) { 2257 // The errata fixes are sensitive to addresses modulo 4 KiB. When we add 2258 // thunks we disturb the base addresses of sections placed after the thunks 2259 // this makes patches we have generated redundant, and may cause us to 2260 // generate more patches as different instructions are now in sensitive 2261 // locations. When we generate more patches we may force more branches to 2262 // go out of range, causing more thunks to be generated. In pathological 2263 // cases this can cause the address dependent content pass not to converge. 2264 // We fix this by rounding up the size of the ThunkSection to 4KiB, this 2265 // limits the insertion of a ThunkSection on the addresses modulo 4 KiB, 2266 // which means that adding Thunks to the section does not invalidate 2267 // errata patches for following code. 2268 // Rounding up the size to 4KiB has consequences for code-size and can 2269 // trip up linker script defined assertions. For example the linux kernel 2270 // has an assertion that what LLD represents as an InputSectionDescription 2271 // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib. 2272 // We use the heuristic of rounding up the size when both of the following 2273 // conditions are true: 2274 // 1.) The OutputSection is larger than the ThunkSectionSpacing. This 2275 // accounts for the case where no single InputSectionDescription is 2276 // larger than the OutputSection size. This is conservative but simple. 2277 // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent 2278 // any assertion failures that an InputSectionDescription is < 4 KiB 2279 // in size. 2280 uint64_t isdSize = isd->sections.back()->outSecOff + 2281 isd->sections.back()->getSize() - 2282 isd->sections.front()->outSecOff; 2283 if (os->size > ctx.target->getThunkSectionSpacing() && isdSize > 4096) 2284 ts->roundUpSizeForErrata = true; 2285 } 2286 isd->thunkSections.push_back({ts, pass}); 2287 return ts; 2288 } 2289 2290 static bool isThunkSectionCompatible(InputSection *source, 2291 SectionBase *target) { 2292 // We can't reuse thunks in different loadable partitions because they might 2293 // not be loaded. But partition 1 (the main partition) will always be loaded. 2294 if (source->partition != target->partition) 2295 return target->partition == 1; 2296 return true; 2297 } 2298 2299 std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec, 2300 Relocation &rel, uint64_t src) { 2301 SmallVector<std::unique_ptr<Thunk>, 0> *thunkVec = nullptr; 2302 // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled 2303 // out in the relocation addend. We compensate for the PC bias so that 2304 // an Arm and Thumb relocation to the same destination get the same keyAddend, 2305 // which is usually 0. 2306 const int64_t pcBias = getPCBias(ctx, rel.type); 2307 const int64_t keyAddend = rel.addend + pcBias; 2308 2309 // We use a ((section, offset), addend) pair to find the thunk position if 2310 // possible so that we create only one thunk for aliased symbols or ICFed 2311 // sections. There may be multiple relocations sharing the same (section, 2312 // offset + addend) pair. We may revert the relocation back to its original 2313 // non-Thunk target, so we cannot fold offset + addend. 2314 if (auto *d = dyn_cast<Defined>(rel.sym)) 2315 if (!d->isInPlt(ctx) && d->section) 2316 thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value}, 2317 keyAddend}]; 2318 if (!thunkVec) 2319 thunkVec = &thunkedSymbols[{rel.sym, keyAddend}]; 2320 2321 // Check existing Thunks for Sym to see if they can be reused 2322 for (auto &t : *thunkVec) 2323 if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) && 2324 t->isCompatibleWith(*isec, rel) && 2325 ctx.target->inBranchRange(rel.type, src, 2326 t->getThunkTargetSym()->getVA(ctx, -pcBias))) 2327 return std::make_pair(t.get(), false); 2328 2329 // No existing compatible Thunk in range, create a new one 2330 thunkVec->push_back(addThunk(ctx, *isec, rel)); 2331 return std::make_pair(thunkVec->back().get(), true); 2332 } 2333 2334 std::pair<Thunk *, bool> ThunkCreator::getSyntheticLandingPad(Defined &d, 2335 int64_t a) { 2336 auto [it, isNew] = landingPadsBySectionAndAddend.try_emplace( 2337 {{d.section, d.value}, a}, nullptr); 2338 if (isNew) 2339 it->second = addLandingPadThunk(ctx, d, a); 2340 return {it->second.get(), isNew}; 2341 } 2342 2343 // Return true if the relocation target is an in range Thunk. 2344 // Return false if the relocation is not to a Thunk. If the relocation target 2345 // was originally to a Thunk, but is no longer in range we revert the 2346 // relocation back to its original non-Thunk target. 2347 bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) { 2348 if (Thunk *t = thunks.lookup(rel.sym)) { 2349 if (ctx.target->inBranchRange(rel.type, src, 2350 rel.sym->getVA(ctx, rel.addend))) 2351 return true; 2352 rel.sym = &t->destination; 2353 rel.addend = t->addend; 2354 if (rel.sym->isInPlt(ctx)) 2355 rel.expr = toPlt(rel.expr); 2356 } 2357 return false; 2358 } 2359 2360 // When indirect branches are restricted, such as AArch64 BTI Thunks may need 2361 // to target a linker generated landing pad instead of the target. This needs 2362 // to be done once per pass as the need for a BTI thunk is dependent whether 2363 // a thunk is short or long. We iterate over all the thunks to make sure we 2364 // catch thunks that have been created but are no longer live. Non-live thunks 2365 // are not reachable via normalizeExistingThunk() but are still written. 2366 bool ThunkCreator::addSyntheticLandingPads() { 2367 bool addressesChanged = false; 2368 for (Thunk *t : allThunks) { 2369 if (!t->needsSyntheticLandingPad()) 2370 continue; 2371 Thunk *lpt; 2372 bool isNew; 2373 auto &dr = cast<Defined>(t->destination); 2374 std::tie(lpt, isNew) = getSyntheticLandingPad(dr, t->addend); 2375 if (isNew) { 2376 addressesChanged = true; 2377 getISThunkSec(cast<InputSection>(dr.section))->addThunk(lpt); 2378 } 2379 t->landingPad = lpt->getThunkTargetSym(); 2380 } 2381 return addressesChanged; 2382 } 2383 2384 // Process all relocations from the InputSections that have been assigned 2385 // to InputSectionDescriptions and redirect through Thunks if needed. The 2386 // function should be called iteratively until it returns false. 2387 // 2388 // PreConditions: 2389 // All InputSections that may need a Thunk are reachable from 2390 // OutputSectionCommands. 2391 // 2392 // All OutputSections have an address and all InputSections have an offset 2393 // within the OutputSection. 2394 // 2395 // The offsets between caller (relocation place) and callee 2396 // (relocation target) will not be modified outside of createThunks(). 2397 // 2398 // PostConditions: 2399 // If return value is true then ThunkSections have been inserted into 2400 // OutputSections. All relocations that needed a Thunk based on the information 2401 // available to createThunks() on entry have been redirected to a Thunk. Note 2402 // that adding Thunks changes offsets between caller and callee so more Thunks 2403 // may be required. 2404 // 2405 // If return value is false then no more Thunks are needed, and createThunks has 2406 // made no changes. If the target requires range extension thunks, currently 2407 // ARM, then any future change in offset between caller and callee risks a 2408 // relocation out of range error. 2409 bool ThunkCreator::createThunks(uint32_t pass, 2410 ArrayRef<OutputSection *> outputSections) { 2411 this->pass = pass; 2412 bool addressesChanged = false; 2413 2414 if (pass == 0 && ctx.target->getThunkSectionSpacing()) 2415 createInitialThunkSections(outputSections); 2416 2417 if (ctx.arg.emachine == EM_AARCH64) 2418 addressesChanged = addSyntheticLandingPads(); 2419 2420 // Create all the Thunks and insert them into synthetic ThunkSections. The 2421 // ThunkSections are later inserted back into InputSectionDescriptions. 2422 // We separate the creation of ThunkSections from the insertion of the 2423 // ThunkSections as ThunkSections are not always inserted into the same 2424 // InputSectionDescription as the caller. 2425 forEachInputSectionDescription( 2426 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2427 for (InputSection *isec : isd->sections) 2428 for (Relocation &rel : isec->relocs()) { 2429 uint64_t src = isec->getVA(rel.offset); 2430 2431 // If we are a relocation to an existing Thunk, check if it is 2432 // still in range. If not then Rel will be altered to point to its 2433 // original target so another Thunk can be generated. 2434 if (pass > 0 && normalizeExistingThunk(rel, src)) 2435 continue; 2436 2437 if (!ctx.target->needsThunk(rel.expr, rel.type, isec->file, src, 2438 *rel.sym, rel.addend)) 2439 continue; 2440 2441 Thunk *t; 2442 bool isNew; 2443 std::tie(t, isNew) = getThunk(isec, rel, src); 2444 2445 if (isNew) { 2446 // Find or create a ThunkSection for the new Thunk 2447 ThunkSection *ts; 2448 if (auto *tis = t->getTargetInputSection()) 2449 ts = getISThunkSec(tis); 2450 else 2451 ts = getISDThunkSec(os, isec, isd, rel, src); 2452 ts->addThunk(t); 2453 thunks[t->getThunkTargetSym()] = t; 2454 allThunks.push_back(t); 2455 } 2456 2457 // Redirect relocation to Thunk, we never go via the PLT to a Thunk 2458 rel.sym = t->getThunkTargetSym(); 2459 rel.expr = fromPlt(rel.expr); 2460 2461 // On AArch64 and PPC, a jump/call relocation may be encoded as 2462 // STT_SECTION + non-zero addend, clear the addend after 2463 // redirection. 2464 if (ctx.arg.emachine != EM_MIPS) 2465 rel.addend = -getPCBias(ctx, rel.type); 2466 } 2467 2468 for (auto &p : isd->thunkSections) 2469 addressesChanged |= p.first->assignOffsets(); 2470 }); 2471 2472 for (auto &p : thunkedSections) 2473 addressesChanged |= p.second->assignOffsets(); 2474 2475 // Merge all created synthetic ThunkSections back into OutputSection 2476 mergeThunks(outputSections); 2477 return addressesChanged; 2478 } 2479 2480 // The following aid in the conversion of call x@GDPLT to call __tls_get_addr 2481 // hexagonNeedsTLSSymbol scans for relocations would require a call to 2482 // __tls_get_addr. 2483 // hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr. 2484 bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) { 2485 bool needTlsSymbol = false; 2486 forEachInputSectionDescription( 2487 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2488 for (InputSection *isec : isd->sections) 2489 for (Relocation &rel : isec->relocs()) 2490 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { 2491 needTlsSymbol = true; 2492 return; 2493 } 2494 }); 2495 return needTlsSymbol; 2496 } 2497 2498 void elf::hexagonTLSSymbolUpdate(Ctx &ctx) { 2499 Symbol *sym = ctx.symtab->find("__tls_get_addr"); 2500 if (!sym) 2501 return; 2502 bool needEntry = true; 2503 forEachInputSectionDescription( 2504 ctx.outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2505 for (InputSection *isec : isd->sections) 2506 for (Relocation &rel : isec->relocs()) 2507 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { 2508 if (needEntry) { 2509 sym->allocateAux(ctx); 2510 addPltEntry(ctx, *ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt, 2511 ctx.target->pltRel, *sym); 2512 needEntry = false; 2513 } 2514 rel.sym = sym; 2515 } 2516 }); 2517 } 2518 2519 static bool matchesRefTo(const NoCrossRefCommand &cmd, StringRef osec) { 2520 if (cmd.toFirst) 2521 return cmd.outputSections[0] == osec; 2522 return llvm::is_contained(cmd.outputSections, osec); 2523 } 2524 2525 template <class ELFT, class Rels> 2526 static void scanCrossRefs(Ctx &ctx, const NoCrossRefCommand &cmd, 2527 OutputSection *osec, InputSection *sec, Rels rels) { 2528 for (const auto &r : rels) { 2529 Symbol &sym = sec->file->getSymbol(r.getSymbol(ctx.arg.isMips64EL)); 2530 // A legal cross-reference is when the destination output section is 2531 // nullptr, osec for a self-reference, or a section that is described by the 2532 // NOCROSSREFS/NOCROSSREFS_TO command. 2533 auto *dstOsec = sym.getOutputSection(); 2534 if (!dstOsec || dstOsec == osec || !matchesRefTo(cmd, dstOsec->name)) 2535 continue; 2536 2537 std::string toSymName; 2538 if (!sym.isSection()) 2539 toSymName = toStr(ctx, sym); 2540 else if (auto *d = dyn_cast<Defined>(&sym)) 2541 toSymName = d->section->name; 2542 Err(ctx) << sec->getLocation(r.r_offset) 2543 << ": prohibited cross reference from '" << osec->name << "' to '" 2544 << toSymName << "' in '" << dstOsec->name << "'"; 2545 } 2546 } 2547 2548 // For each output section described by at least one NOCROSSREFS(_TO) command, 2549 // scan relocations from its input sections for prohibited cross references. 2550 template <class ELFT> void elf::checkNoCrossRefs(Ctx &ctx) { 2551 for (OutputSection *osec : ctx.outputSections) { 2552 for (const NoCrossRefCommand &noxref : ctx.script->noCrossRefs) { 2553 if (!llvm::is_contained(noxref.outputSections, osec->name) || 2554 (noxref.toFirst && noxref.outputSections[0] == osec->name)) 2555 continue; 2556 for (SectionCommand *cmd : osec->commands) { 2557 auto *isd = dyn_cast<InputSectionDescription>(cmd); 2558 if (!isd) 2559 continue; 2560 parallelForEach(isd->sections, [&](InputSection *sec) { 2561 invokeOnRelocs(*sec, scanCrossRefs<ELFT>, ctx, noxref, osec, sec); 2562 }); 2563 } 2564 } 2565 } 2566 } 2567 2568 template void elf::scanRelocations<ELF32LE>(Ctx &); 2569 template void elf::scanRelocations<ELF32BE>(Ctx &); 2570 template void elf::scanRelocations<ELF64LE>(Ctx &); 2571 template void elf::scanRelocations<ELF64BE>(Ctx &); 2572 2573 template void elf::checkNoCrossRefs<ELF32LE>(Ctx &); 2574 template void elf::checkNoCrossRefs<ELF32BE>(Ctx &); 2575 template void elf::checkNoCrossRefs<ELF64LE>(Ctx &); 2576 template void elf::checkNoCrossRefs<ELF64BE>(Ctx &); 2577