1 //===- ConcatOutputSection.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ConcatOutputSection.h" 10 #include "Config.h" 11 #include "OutputSegment.h" 12 #include "SymbolTable.h" 13 #include "Symbols.h" 14 #include "SyntheticSections.h" 15 #include "Target.h" 16 #include "lld/Common/CommonLinkerContext.h" 17 #include "llvm/BinaryFormat/MachO.h" 18 #include "llvm/Support/ScopedPrinter.h" 19 #include "llvm/Support/TimeProfiler.h" 20 21 using namespace llvm; 22 using namespace llvm::MachO; 23 using namespace lld; 24 using namespace lld::macho; 25 26 MapVector<NamePair, ConcatOutputSection *> macho::concatOutputSections; 27 28 void ConcatOutputSection::addInput(ConcatInputSection *input) { 29 assert(input->parent == this); 30 if (inputs.empty()) { 31 align = input->align; 32 flags = input->getFlags(); 33 } else { 34 align = std::max(align, input->align); 35 finalizeFlags(input); 36 } 37 inputs.push_back(input); 38 } 39 40 // Branch-range extension can be implemented in two ways, either through ... 41 // 42 // (1) Branch islands: Single branch instructions (also of limited range), 43 // that might be chained in multiple hops to reach the desired 44 // destination. On ARM64, as 16 branch islands are needed to hop between 45 // opposite ends of a 2 GiB program. LD64 uses branch islands exclusively, 46 // even when it needs excessive hops. 47 // 48 // (2) Thunks: Instruction(s) to load the destination address into a scratch 49 // register, followed by a register-indirect branch. Thunks are 50 // constructed to reach any arbitrary address, so need not be 51 // chained. Although thunks need not be chained, a program might need 52 // multiple thunks to the same destination distributed throughout a large 53 // program so that all call sites can have one within range. 54 // 55 // The optimal approach is to mix islands for destinations within two hops, 56 // and use thunks for destinations at greater distance. For now, we only 57 // implement thunks. TODO: Adding support for branch islands! 58 // 59 // Internally -- as expressed in LLD's data structures -- a 60 // branch-range-extension thunk consists of: 61 // 62 // (1) new Defined symbol for the thunk named 63 // <FUNCTION>.thunk.<SEQUENCE>, which references ... 64 // (2) new InputSection, which contains ... 65 // (3.1) new data for the instructions to load & branch to the far address + 66 // (3.2) new Relocs on instructions to load the far address, which reference ... 67 // (4.1) existing Defined symbol for the real function in __text, or 68 // (4.2) existing DylibSymbol for the real function in a dylib 69 // 70 // Nearly-optimal thunk-placement algorithm features: 71 // 72 // * Single pass: O(n) on the number of call sites. 73 // 74 // * Accounts for the exact space overhead of thunks - no heuristics 75 // 76 // * Exploits the full range of call instructions - forward & backward 77 // 78 // Data: 79 // 80 // * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol 81 // to its thunk bookkeeper. 82 // 83 // * struct ThunkInfo (bookkeeper): Call instructions have limited range, and 84 // distant call sites might be unable to reach the same thunk, so multiple 85 // thunks are necessary to serve all call sites in a very large program. A 86 // thunkInfo stores state for all thunks associated with a particular 87 // function: 88 // (a) thunk symbol 89 // (b) input section containing stub code, and 90 // (c) sequence number for the active thunk incarnation. 91 // When an old thunk goes out of range, we increment the sequence number and 92 // create a new thunk named <FUNCTION>.thunk.<SEQUENCE>. 93 // 94 // * A thunk consists of 95 // (a) a Defined symbol pointing to 96 // (b) an InputSection holding machine code (similar to a MachO stub), and 97 // (c) relocs referencing the real function for fixing up the stub code. 98 // 99 // * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel 100 // to the inputs vector. We store new thunks via cheap vector append, rather 101 // than costly insertion into the inputs vector. 102 // 103 // Control Flow: 104 // 105 // * During address assignment, MergedInputSection::finalize() examines call 106 // sites by ascending address and creates thunks. When a function is beyond 107 // the range of a call site, we need a thunk. Place it at the largest 108 // available forward address from the call site. Call sites increase 109 // monotonically and thunks are always placed as far forward as possible; 110 // thus, we place thunks at monotonically increasing addresses. Once a thunk 111 // is placed, it and all previous input-section addresses are final. 112 // 113 // * ConcatInputSection::finalize() and ConcatInputSection::writeTo() merge 114 // the inputs and thunks vectors (both ordered by ascending address), which 115 // is simple and cheap. 116 117 DenseMap<Symbol *, ThunkInfo> lld::macho::thunkMap; 118 119 // Determine whether we need thunks, which depends on the target arch -- RISC 120 // (i.e., ARM) generally does because it has limited-range branch/call 121 // instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs 122 // thunks for programs so large that branch source & destination addresses 123 // might differ more than the range of branch instruction(s). 124 bool TextOutputSection::needsThunks() const { 125 if (!target->usesThunks()) 126 return false; 127 uint64_t isecAddr = addr; 128 for (ConcatInputSection *isec : inputs) 129 isecAddr = alignToPowerOf2(isecAddr, isec->align) + isec->getSize(); 130 // Other sections besides __text might be small enough to pass this 131 // test but nevertheless need thunks for calling into other sections. 132 // An imperfect heuristic to use in this case is that if a section 133 // we've already processed in this segment needs thunks, so do the 134 // rest. 135 bool needsThunks = parent && parent->needsThunks; 136 if (!needsThunks && 137 isecAddr - addr + in.stubs->getSize() <= 138 std::min(target->backwardBranchRange, target->forwardBranchRange)) 139 return false; 140 // Yes, this program is large enough to need thunks. 141 if (parent) { 142 parent->needsThunks = true; 143 } 144 for (ConcatInputSection *isec : inputs) { 145 for (Reloc &r : isec->relocs) { 146 if (!target->hasAttr(r.type, RelocAttrBits::BRANCH)) 147 continue; 148 auto *sym = cast<Symbol *>(r.referent); 149 // Pre-populate the thunkMap and memoize call site counts for every 150 // InputSection and ThunkInfo. We do this for the benefit of 151 // estimateStubsInRangeVA(). 152 ThunkInfo &thunkInfo = thunkMap[sym]; 153 // Knowing ThunkInfo call site count will help us know whether or not we 154 // might need to create more for this referent at the time we are 155 // estimating distance to __stubs in estimateStubsInRangeVA(). 156 ++thunkInfo.callSiteCount; 157 // We can avoid work on InputSections that have no BRANCH relocs. 158 isec->hasCallSites = true; 159 } 160 } 161 return true; 162 } 163 164 // Since __stubs is placed after __text, we must estimate the address 165 // beyond which stubs are within range of a simple forward branch. 166 // This is called exactly once, when the last input section has been finalized. 167 uint64_t TextOutputSection::estimateStubsInRangeVA(size_t callIdx) const { 168 // Tally the functions which still have call sites remaining to process, 169 // which yields the maximum number of thunks we might yet place. 170 size_t maxPotentialThunks = 0; 171 for (auto &tp : thunkMap) { 172 ThunkInfo &ti = tp.second; 173 // This overcounts: Only sections that are in forward jump range from the 174 // currently-active section get finalized, and all input sections are 175 // finalized when estimateStubsInRangeVA() is called. So only backward 176 // jumps will need thunks, but we count all jumps. 177 if (ti.callSitesUsed < ti.callSiteCount) 178 maxPotentialThunks += 1; 179 } 180 // Tally the total size of input sections remaining to process. 181 uint64_t isecVA = inputs[callIdx]->getVA(); 182 uint64_t isecEnd = isecVA; 183 for (size_t i = callIdx; i < inputs.size(); i++) { 184 InputSection *isec = inputs[i]; 185 isecEnd = alignToPowerOf2(isecEnd, isec->align) + isec->getSize(); 186 } 187 188 // Tally up any thunks that have already been placed that have VA higher than 189 // inputs[callIdx]. First, find the index of the first thunk that is beyond 190 // the current inputs[callIdx]. 191 auto itPostcallIdxThunks = 192 llvm::partition_point(thunks, [isecVA](const ConcatInputSection *t) { 193 return t->getVA() <= isecVA; 194 }); 195 uint64_t existingForwardThunks = thunks.end() - itPostcallIdxThunks; 196 197 uint64_t forwardBranchRange = target->forwardBranchRange; 198 assert(isecEnd > forwardBranchRange && 199 "should not run thunk insertion if all code fits in jump range"); 200 assert(isecEnd - isecVA <= forwardBranchRange && 201 "should only finalize sections in jump range"); 202 203 // Estimate the maximum size of the code, right before the stubs section. 204 uint64_t maxTextSize = 0; 205 // Add the size of all the inputs, including the unprocessed ones. 206 maxTextSize += isecEnd; 207 208 // Add the size of the thunks that have already been created that are ahead of 209 // inputs[callIdx]. These are already created thunks that will be interleaved 210 // with inputs[callIdx...end]. 211 maxTextSize += existingForwardThunks * target->thunkSize; 212 213 // Add the size of the thunks that may be created in the future. Since 214 // 'maxPotentialThunks' overcounts, this is an estimate of the upper limit. 215 maxTextSize += maxPotentialThunks * target->thunkSize; 216 217 // Estimated maximum VA of last stub. 218 uint64_t maxVAOfLastStub = maxTextSize + in.stubs->getSize(); 219 220 // Estimate the address after which call sites can safely call stubs 221 // directly rather than through intermediary thunks. 222 uint64_t stubsInRangeVA = maxVAOfLastStub - forwardBranchRange; 223 224 log("thunks = " + std::to_string(thunkMap.size()) + 225 ", potential = " + std::to_string(maxPotentialThunks) + 226 ", stubs = " + std::to_string(in.stubs->getSize()) + ", isecVA = " + 227 utohexstr(isecVA) + ", threshold = " + utohexstr(stubsInRangeVA) + 228 ", isecEnd = " + utohexstr(isecEnd) + 229 ", tail = " + utohexstr(isecEnd - isecVA) + 230 ", slop = " + utohexstr(forwardBranchRange - (isecEnd - isecVA))); 231 return stubsInRangeVA; 232 } 233 234 void ConcatOutputSection::finalizeOne(ConcatInputSection *isec) { 235 size = alignToPowerOf2(size, isec->align); 236 fileSize = alignToPowerOf2(fileSize, isec->align); 237 isec->outSecOff = size; 238 isec->isFinal = true; 239 size += isec->getSize(); 240 fileSize += isec->getFileSize(); 241 } 242 243 void ConcatOutputSection::finalizeContents() { 244 for (ConcatInputSection *isec : inputs) 245 finalizeOne(isec); 246 } 247 248 void TextOutputSection::finalize() { 249 if (!needsThunks()) { 250 for (ConcatInputSection *isec : inputs) 251 finalizeOne(isec); 252 return; 253 } 254 255 uint64_t forwardBranchRange = target->forwardBranchRange; 256 uint64_t backwardBranchRange = target->backwardBranchRange; 257 uint64_t stubsInRangeVA = TargetInfo::outOfRangeVA; 258 size_t thunkSize = target->thunkSize; 259 size_t relocCount = 0; 260 size_t callSiteCount = 0; 261 size_t thunkCallCount = 0; 262 size_t thunkCount = 0; 263 264 // Walk all sections in order. Finalize all sections that are less than 265 // forwardBranchRange in front of it. 266 // isecVA is the address of the current section. 267 // addr + size is the start address of the first non-finalized section. 268 269 // inputs[finalIdx] is for finalization (address-assignment) 270 size_t finalIdx = 0; 271 // Kick-off by ensuring that the first input section has an address 272 for (size_t callIdx = 0, endIdx = inputs.size(); callIdx < endIdx; 273 ++callIdx) { 274 if (finalIdx == callIdx) 275 finalizeOne(inputs[finalIdx++]); 276 ConcatInputSection *isec = inputs[callIdx]; 277 assert(isec->isFinal); 278 uint64_t isecVA = isec->getVA(); 279 280 // Assign addresses up-to the forward branch-range limit. 281 // Every call instruction needs a small number of bytes (on Arm64: 4), 282 // and each inserted thunk needs a slightly larger number of bytes 283 // (on Arm64: 12). If a section starts with a branch instruction and 284 // contains several branch instructions in succession, then the distance 285 // from the current position to the position where the thunks are inserted 286 // grows. So leave room for a bunch of thunks. 287 unsigned slop = 256 * thunkSize; 288 while (finalIdx < endIdx) { 289 uint64_t expectedNewSize = 290 alignToPowerOf2(addr + size, inputs[finalIdx]->align) + 291 inputs[finalIdx]->getSize(); 292 if (expectedNewSize >= isecVA + forwardBranchRange - slop) 293 break; 294 finalizeOne(inputs[finalIdx++]); 295 } 296 297 if (!isec->hasCallSites) 298 continue; 299 300 if (finalIdx == endIdx && stubsInRangeVA == TargetInfo::outOfRangeVA) { 301 // When we have finalized all input sections, __stubs (destined 302 // to follow __text) comes within range of forward branches and 303 // we can estimate the threshold address after which we can 304 // reach any stub with a forward branch. Note that although it 305 // sits in the middle of a loop, this code executes only once. 306 // It is in the loop because we need to call it at the proper 307 // time: the earliest call site from which the end of __text 308 // (and start of __stubs) comes within range of a forward branch. 309 stubsInRangeVA = estimateStubsInRangeVA(callIdx); 310 } 311 // Process relocs by ascending address, i.e., ascending offset within isec 312 std::vector<Reloc> &relocs = isec->relocs; 313 // FIXME: This property does not hold for object files produced by ld64's 314 // `-r` mode. 315 assert(is_sorted(relocs, 316 [](Reloc &a, Reloc &b) { return a.offset > b.offset; })); 317 for (Reloc &r : reverse(relocs)) { 318 ++relocCount; 319 if (!target->hasAttr(r.type, RelocAttrBits::BRANCH)) 320 continue; 321 ++callSiteCount; 322 // Calculate branch reachability boundaries 323 uint64_t callVA = isecVA + r.offset; 324 uint64_t lowVA = 325 backwardBranchRange < callVA ? callVA - backwardBranchRange : 0; 326 uint64_t highVA = callVA + forwardBranchRange; 327 // Calculate our call referent address 328 auto *funcSym = cast<Symbol *>(r.referent); 329 ThunkInfo &thunkInfo = thunkMap[funcSym]; 330 // The referent is not reachable, so we need to use a thunk ... 331 if (funcSym->isInStubs() && callVA >= stubsInRangeVA) { 332 assert(callVA != TargetInfo::outOfRangeVA); 333 // ... Oh, wait! We are close enough to the end that __stubs 334 // are now within range of a simple forward branch. 335 continue; 336 } 337 uint64_t funcVA = funcSym->resolveBranchVA(); 338 ++thunkInfo.callSitesUsed; 339 if (lowVA <= funcVA && funcVA <= highVA) { 340 // The referent is reachable with a simple call instruction. 341 continue; 342 } 343 ++thunkInfo.thunkCallCount; 344 ++thunkCallCount; 345 // If an existing thunk is reachable, use it ... 346 if (thunkInfo.sym) { 347 uint64_t thunkVA = thunkInfo.isec->getVA(); 348 if (lowVA <= thunkVA && thunkVA <= highVA) { 349 r.referent = thunkInfo.sym; 350 continue; 351 } 352 } 353 // ... otherwise, create a new thunk. 354 if (addr + size > highVA) { 355 // There were too many consecutive branch instructions for `slop` 356 // above. If you hit this: For the current algorithm, just bumping up 357 // slop above and trying again is probably simplest. (See also PR51578 358 // comment 5). 359 fatal(Twine(__FUNCTION__) + ": FIXME: thunk range overrun"); 360 } 361 thunkInfo.isec = 362 makeSyntheticInputSection(isec->getSegName(), isec->getName()); 363 thunkInfo.isec->parent = this; 364 assert(thunkInfo.isec->live); 365 366 StringRef thunkName = saver().save(funcSym->getName() + ".thunk." + 367 std::to_string(thunkInfo.sequence++)); 368 if (!isa<Defined>(funcSym) || cast<Defined>(funcSym)->isExternal()) { 369 r.referent = thunkInfo.sym = symtab->addDefined( 370 thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0, thunkSize, 371 /*isWeakDef=*/false, /*isPrivateExtern=*/true, 372 /*isReferencedDynamically=*/false, /*noDeadStrip=*/false, 373 /*isWeakDefCanBeHidden=*/false); 374 } else { 375 r.referent = thunkInfo.sym = make<Defined>( 376 thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0, thunkSize, 377 /*isWeakDef=*/false, /*isExternal=*/false, /*isPrivateExtern=*/true, 378 /*includeInSymtab=*/true, /*isReferencedDynamically=*/false, 379 /*noDeadStrip=*/false, /*isWeakDefCanBeHidden=*/false); 380 } 381 thunkInfo.sym->used = true; 382 target->populateThunk(thunkInfo.isec, funcSym); 383 finalizeOne(thunkInfo.isec); 384 thunks.push_back(thunkInfo.isec); 385 ++thunkCount; 386 } 387 } 388 389 log("thunks for " + parent->name + "," + name + 390 ": funcs = " + std::to_string(thunkMap.size()) + 391 ", relocs = " + std::to_string(relocCount) + 392 ", all calls = " + std::to_string(callSiteCount) + 393 ", thunk calls = " + std::to_string(thunkCallCount) + 394 ", thunks = " + std::to_string(thunkCount)); 395 } 396 397 void ConcatOutputSection::writeTo(uint8_t *buf) const { 398 for (ConcatInputSection *isec : inputs) 399 isec->writeTo(buf + isec->outSecOff); 400 } 401 402 void TextOutputSection::writeTo(uint8_t *buf) const { 403 // Merge input sections from thunk & ordinary vectors 404 size_t i = 0, ie = inputs.size(); 405 size_t t = 0, te = thunks.size(); 406 while (i < ie || t < te) { 407 while (i < ie && (t == te || inputs[i]->empty() || 408 inputs[i]->outSecOff < thunks[t]->outSecOff)) { 409 inputs[i]->writeTo(buf + inputs[i]->outSecOff); 410 ++i; 411 } 412 while (t < te && (i == ie || thunks[t]->outSecOff < inputs[i]->outSecOff)) { 413 thunks[t]->writeTo(buf + thunks[t]->outSecOff); 414 ++t; 415 } 416 } 417 } 418 419 void ConcatOutputSection::finalizeFlags(InputSection *input) { 420 switch (sectionType(input->getFlags())) { 421 default /*type-unspec'ed*/: 422 // FIXME: Add additional logic here when supporting emitting obj files. 423 break; 424 case S_4BYTE_LITERALS: 425 case S_8BYTE_LITERALS: 426 case S_16BYTE_LITERALS: 427 case S_CSTRING_LITERALS: 428 case S_ZEROFILL: 429 case S_LAZY_SYMBOL_POINTERS: 430 case S_MOD_TERM_FUNC_POINTERS: 431 case S_THREAD_LOCAL_REGULAR: 432 case S_THREAD_LOCAL_ZEROFILL: 433 case S_THREAD_LOCAL_VARIABLES: 434 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS: 435 case S_THREAD_LOCAL_VARIABLE_POINTERS: 436 case S_NON_LAZY_SYMBOL_POINTERS: 437 case S_SYMBOL_STUBS: 438 flags |= input->getFlags(); 439 break; 440 } 441 } 442 443 ConcatOutputSection * 444 ConcatOutputSection::getOrCreateForInput(const InputSection *isec) { 445 NamePair names = maybeRenameSection({isec->getSegName(), isec->getName()}); 446 ConcatOutputSection *&osec = concatOutputSections[names]; 447 if (!osec) { 448 if (isec->getSegName() == segment_names::text && 449 isec->getName() != section_names::gccExceptTab && 450 isec->getName() != section_names::ehFrame) 451 osec = make<TextOutputSection>(names.second); 452 else 453 osec = make<ConcatOutputSection>(names.second); 454 } 455 return osec; 456 } 457 458 NamePair macho::maybeRenameSection(NamePair key) { 459 auto newNames = config->sectionRenameMap.find(key); 460 if (newNames != config->sectionRenameMap.end()) 461 return newNames->second; 462 return key; 463 } 464