xref: /llvm-project/lld/MachO/SyntheticSections.cpp (revision c676104875f34a87051b446469cc395932bc1f13)
1 //===- SyntheticSections.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "SyntheticSections.h"
10 #include "ConcatOutputSection.h"
11 #include "Config.h"
12 #include "ExportTrie.h"
13 #include "ICF.h"
14 #include "InputFiles.h"
15 #include "MachOStructs.h"
16 #include "ObjC.h"
17 #include "OutputSegment.h"
18 #include "SymbolTable.h"
19 #include "Symbols.h"
20 
21 #include "lld/Common/CommonLinkerContext.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/Support/EndianStream.h"
25 #include "llvm/Support/FileSystem.h"
26 #include "llvm/Support/LEB128.h"
27 #include "llvm/Support/Parallel.h"
28 #include "llvm/Support/Path.h"
29 #include "llvm/Support/xxhash.h"
30 
31 #if defined(__APPLE__)
32 #include <sys/mman.h>
33 
34 #define COMMON_DIGEST_FOR_OPENSSL
35 #include <CommonCrypto/CommonDigest.h>
36 #else
37 #include "llvm/Support/SHA256.h"
38 #endif
39 
40 using namespace llvm;
41 using namespace llvm::MachO;
42 using namespace llvm::support;
43 using namespace llvm::support::endian;
44 using namespace lld;
45 using namespace lld::macho;
46 
47 // Reads `len` bytes at data and writes the 32-byte SHA256 checksum to `output`.
48 static void sha256(const uint8_t *data, size_t len, uint8_t *output) {
49 #if defined(__APPLE__)
50   // FIXME: Make LLVM's SHA256 faster and use it unconditionally. See PR56121
51   // for some notes on this.
52   CC_SHA256(data, len, output);
53 #else
54   ArrayRef<uint8_t> block(data, len);
55   std::array<uint8_t, 32> hash = SHA256::hash(block);
56   static_assert(hash.size() == CodeSignatureSection::hashSize);
57   memcpy(output, hash.data(), hash.size());
58 #endif
59 }
60 
61 InStruct macho::in;
62 std::vector<SyntheticSection *> macho::syntheticSections;
63 
64 SyntheticSection::SyntheticSection(const char *segname, const char *name)
65     : OutputSection(SyntheticKind, name) {
66   std::tie(this->segname, this->name) = maybeRenameSection({segname, name});
67   isec = makeSyntheticInputSection(segname, name);
68   isec->parent = this;
69   syntheticSections.push_back(this);
70 }
71 
72 // dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts
73 // from the beginning of the file (i.e. the header).
74 MachHeaderSection::MachHeaderSection()
75     : SyntheticSection(segment_names::text, section_names::header) {
76   // XXX: This is a hack. (See D97007)
77   // Setting the index to 1 to pretend that this section is the text
78   // section.
79   index = 1;
80   isec->isFinal = true;
81 }
82 
83 void MachHeaderSection::addLoadCommand(LoadCommand *lc) {
84   loadCommands.push_back(lc);
85   sizeOfCmds += lc->getSize();
86 }
87 
88 uint64_t MachHeaderSection::getSize() const {
89   uint64_t size = target->headerSize + sizeOfCmds + config->headerPad;
90   // If we are emitting an encryptable binary, our load commands must have a
91   // separate (non-encrypted) page to themselves.
92   if (config->emitEncryptionInfo)
93     size = alignToPowerOf2(size, target->getPageSize());
94   return size;
95 }
96 
97 static uint32_t cpuSubtype() {
98   uint32_t subtype = target->cpuSubtype;
99 
100   if (config->outputType == MH_EXECUTE && !config->staticLink &&
101       target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL &&
102       config->platform() == PLATFORM_MACOS &&
103       config->platformInfo.target.MinDeployment >= VersionTuple(10, 5))
104     subtype |= CPU_SUBTYPE_LIB64;
105 
106   return subtype;
107 }
108 
109 static bool hasWeakBinding() {
110   return config->emitChainedFixups ? in.chainedFixups->hasWeakBinding()
111                                    : in.weakBinding->hasEntry();
112 }
113 
114 static bool hasNonWeakDefinition() {
115   return config->emitChainedFixups ? in.chainedFixups->hasNonWeakDefinition()
116                                    : in.weakBinding->hasNonWeakDefinition();
117 }
118 
119 void MachHeaderSection::writeTo(uint8_t *buf) const {
120   auto *hdr = reinterpret_cast<mach_header *>(buf);
121   hdr->magic = target->magic;
122   hdr->cputype = target->cpuType;
123   hdr->cpusubtype = cpuSubtype();
124   hdr->filetype = config->outputType;
125   hdr->ncmds = loadCommands.size();
126   hdr->sizeofcmds = sizeOfCmds;
127   hdr->flags = MH_DYLDLINK;
128 
129   if (config->namespaceKind == NamespaceKind::twolevel)
130     hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL;
131 
132   if (config->outputType == MH_DYLIB && !config->hasReexports)
133     hdr->flags |= MH_NO_REEXPORTED_DYLIBS;
134 
135   if (config->markDeadStrippableDylib)
136     hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB;
137 
138   if (config->outputType == MH_EXECUTE && config->isPic)
139     hdr->flags |= MH_PIE;
140 
141   if (config->outputType == MH_DYLIB && config->applicationExtension)
142     hdr->flags |= MH_APP_EXTENSION_SAFE;
143 
144   if (in.exports->hasWeakSymbol || hasNonWeakDefinition())
145     hdr->flags |= MH_WEAK_DEFINES;
146 
147   if (in.exports->hasWeakSymbol || hasWeakBinding())
148     hdr->flags |= MH_BINDS_TO_WEAK;
149 
150   for (const OutputSegment *seg : outputSegments) {
151     for (const OutputSection *osec : seg->getSections()) {
152       if (isThreadLocalVariables(osec->flags)) {
153         hdr->flags |= MH_HAS_TLV_DESCRIPTORS;
154         break;
155       }
156     }
157   }
158 
159   uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize;
160   for (const LoadCommand *lc : loadCommands) {
161     lc->writeTo(p);
162     p += lc->getSize();
163   }
164 }
165 
166 PageZeroSection::PageZeroSection()
167     : SyntheticSection(segment_names::pageZero, section_names::pageZero) {}
168 
169 RebaseSection::RebaseSection()
170     : LinkEditSection(segment_names::linkEdit, section_names::rebase) {}
171 
172 namespace {
173 struct RebaseState {
174   uint64_t sequenceLength;
175   uint64_t skipLength;
176 };
177 } // namespace
178 
179 static void emitIncrement(uint64_t incr, raw_svector_ostream &os) {
180   assert(incr != 0);
181 
182   if ((incr >> target->p2WordSize) <= REBASE_IMMEDIATE_MASK &&
183       (incr % target->wordSize) == 0) {
184     os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_IMM_SCALED |
185                                (incr >> target->p2WordSize));
186   } else {
187     os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_ULEB);
188     encodeULEB128(incr, os);
189   }
190 }
191 
192 static void flushRebase(const RebaseState &state, raw_svector_ostream &os) {
193   assert(state.sequenceLength > 0);
194 
195   if (state.skipLength == target->wordSize) {
196     if (state.sequenceLength <= REBASE_IMMEDIATE_MASK) {
197       os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES |
198                                  state.sequenceLength);
199     } else {
200       os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
201       encodeULEB128(state.sequenceLength, os);
202     }
203   } else if (state.sequenceLength == 1) {
204     os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);
205     encodeULEB128(state.skipLength - target->wordSize, os);
206   } else {
207     os << static_cast<uint8_t>(
208         REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);
209     encodeULEB128(state.sequenceLength, os);
210     encodeULEB128(state.skipLength - target->wordSize, os);
211   }
212 }
213 
214 // Rebases are communicated to dyld using a bytecode, whose opcodes cause the
215 // memory location at a specific address to be rebased and/or the address to be
216 // incremented.
217 //
218 // Opcode REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB is the most generic
219 // one, encoding a series of evenly spaced addresses. This algorithm works by
220 // splitting up the sorted list of addresses into such chunks. If the locations
221 // are consecutive or the sequence consists of a single location, flushRebase
222 // will use a smaller, more specialized encoding.
223 static void encodeRebases(const OutputSegment *seg,
224                           MutableArrayRef<Location> locations,
225                           raw_svector_ostream &os) {
226   // dyld operates on segments. Translate section offsets into segment offsets.
227   for (Location &loc : locations)
228     loc.offset =
229         loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(loc.offset);
230   // The algorithm assumes that locations are unique.
231   Location *end =
232       llvm::unique(locations, [](const Location &a, const Location &b) {
233         return a.offset == b.offset;
234       });
235   size_t count = end - locations.begin();
236 
237   os << static_cast<uint8_t>(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
238                              seg->index);
239   assert(!locations.empty());
240   uint64_t offset = locations[0].offset;
241   encodeULEB128(offset, os);
242 
243   RebaseState state{1, target->wordSize};
244 
245   for (size_t i = 1; i < count; ++i) {
246     offset = locations[i].offset;
247 
248     uint64_t skip = offset - locations[i - 1].offset;
249     assert(skip != 0 && "duplicate locations should have been weeded out");
250 
251     if (skip == state.skipLength) {
252       ++state.sequenceLength;
253     } else if (state.sequenceLength == 1) {
254       ++state.sequenceLength;
255       state.skipLength = skip;
256     } else if (skip < state.skipLength) {
257       // The address is lower than what the rebase pointer would be if the last
258       // location would be part of a sequence. We start a new sequence from the
259       // previous location.
260       --state.sequenceLength;
261       flushRebase(state, os);
262 
263       state.sequenceLength = 2;
264       state.skipLength = skip;
265     } else {
266       // The address is at some positive offset from the rebase pointer. We
267       // start a new sequence which begins with the current location.
268       flushRebase(state, os);
269       emitIncrement(skip - state.skipLength, os);
270       state.sequenceLength = 1;
271       state.skipLength = target->wordSize;
272     }
273   }
274   flushRebase(state, os);
275 }
276 
277 void RebaseSection::finalizeContents() {
278   if (locations.empty())
279     return;
280 
281   raw_svector_ostream os{contents};
282   os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER);
283 
284   llvm::sort(locations, [](const Location &a, const Location &b) {
285     return a.isec->getVA(a.offset) < b.isec->getVA(b.offset);
286   });
287 
288   for (size_t i = 0, count = locations.size(); i < count;) {
289     const OutputSegment *seg = locations[i].isec->parent->parent;
290     size_t j = i + 1;
291     while (j < count && locations[j].isec->parent->parent == seg)
292       ++j;
293     encodeRebases(seg, {locations.data() + i, locations.data() + j}, os);
294     i = j;
295   }
296   os << static_cast<uint8_t>(REBASE_OPCODE_DONE);
297 }
298 
299 void RebaseSection::writeTo(uint8_t *buf) const {
300   memcpy(buf, contents.data(), contents.size());
301 }
302 
303 NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname,
304                                                      const char *name)
305     : SyntheticSection(segname, name) {
306   align = target->wordSize;
307 }
308 
309 void macho::addNonLazyBindingEntries(const Symbol *sym,
310                                      const InputSection *isec, uint64_t offset,
311                                      int64_t addend) {
312   if (config->emitChainedFixups) {
313     if (needsBinding(sym))
314       in.chainedFixups->addBinding(sym, isec, offset, addend);
315     else if (isa<Defined>(sym))
316       in.chainedFixups->addRebase(isec, offset);
317     else
318       llvm_unreachable("cannot bind to an undefined symbol");
319     return;
320   }
321 
322   if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
323     in.binding->addEntry(dysym, isec, offset, addend);
324     if (dysym->isWeakDef())
325       in.weakBinding->addEntry(sym, isec, offset, addend);
326   } else if (const auto *defined = dyn_cast<Defined>(sym)) {
327     in.rebase->addEntry(isec, offset);
328     if (defined->isExternalWeakDef())
329       in.weakBinding->addEntry(sym, isec, offset, addend);
330     else if (defined->interposable)
331       in.binding->addEntry(sym, isec, offset, addend);
332   } else {
333     // Undefined symbols are filtered out in scanRelocations(); we should never
334     // get here
335     llvm_unreachable("cannot bind to an undefined symbol");
336   }
337 }
338 
339 void NonLazyPointerSectionBase::addEntry(Symbol *sym) {
340   if (entries.insert(sym)) {
341     assert(!sym->isInGot());
342     sym->gotIndex = entries.size() - 1;
343 
344     addNonLazyBindingEntries(sym, isec, sym->gotIndex * target->wordSize);
345   }
346 }
347 
348 void macho::writeChainedRebase(uint8_t *buf, uint64_t targetVA) {
349   assert(config->emitChainedFixups);
350   assert(target->wordSize == 8 && "Only 64-bit platforms are supported");
351   auto *rebase = reinterpret_cast<dyld_chained_ptr_64_rebase *>(buf);
352   rebase->target = targetVA & 0xf'ffff'ffff;
353   rebase->high8 = (targetVA >> 56);
354   rebase->reserved = 0;
355   rebase->next = 0;
356   rebase->bind = 0;
357 
358   // The fixup format places a 64 GiB limit on the output's size.
359   // Should we handle this gracefully?
360   uint64_t encodedVA = rebase->target | ((uint64_t)rebase->high8 << 56);
361   if (encodedVA != targetVA)
362     error("rebase target address 0x" + Twine::utohexstr(targetVA) +
363           " does not fit into chained fixup. Re-link with -no_fixup_chains");
364 }
365 
366 static void writeChainedBind(uint8_t *buf, const Symbol *sym, int64_t addend) {
367   assert(config->emitChainedFixups);
368   assert(target->wordSize == 8 && "Only 64-bit platforms are supported");
369   auto *bind = reinterpret_cast<dyld_chained_ptr_64_bind *>(buf);
370   auto [ordinal, inlineAddend] = in.chainedFixups->getBinding(sym, addend);
371   bind->ordinal = ordinal;
372   bind->addend = inlineAddend;
373   bind->reserved = 0;
374   bind->next = 0;
375   bind->bind = 1;
376 }
377 
378 void macho::writeChainedFixup(uint8_t *buf, const Symbol *sym, int64_t addend) {
379   if (needsBinding(sym))
380     writeChainedBind(buf, sym, addend);
381   else
382     writeChainedRebase(buf, sym->getVA() + addend);
383 }
384 
385 void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const {
386   if (config->emitChainedFixups) {
387     for (const auto &[i, entry] : llvm::enumerate(entries))
388       writeChainedFixup(&buf[i * target->wordSize], entry, 0);
389   } else {
390     for (const auto &[i, entry] : llvm::enumerate(entries))
391       if (auto *defined = dyn_cast<Defined>(entry))
392         write64le(&buf[i * target->wordSize], defined->getVA());
393   }
394 }
395 
396 GotSection::GotSection()
397     : NonLazyPointerSectionBase(segment_names::data, section_names::got) {
398   flags = S_NON_LAZY_SYMBOL_POINTERS;
399 }
400 
401 TlvPointerSection::TlvPointerSection()
402     : NonLazyPointerSectionBase(segment_names::data,
403                                 section_names::threadPtrs) {
404   flags = S_THREAD_LOCAL_VARIABLE_POINTERS;
405 }
406 
407 BindingSection::BindingSection()
408     : LinkEditSection(segment_names::linkEdit, section_names::binding) {}
409 
410 namespace {
411 struct Binding {
412   OutputSegment *segment = nullptr;
413   uint64_t offset = 0;
414   int64_t addend = 0;
415 };
416 struct BindIR {
417   // Default value of 0xF0 is not valid opcode and should make the program
418   // scream instead of accidentally writing "valid" values.
419   uint8_t opcode = 0xF0;
420   uint64_t data = 0;
421   uint64_t consecutiveCount = 0;
422 };
423 } // namespace
424 
425 // Encode a sequence of opcodes that tell dyld to write the address of symbol +
426 // addend at osec->addr + outSecOff.
427 //
428 // The bind opcode "interpreter" remembers the values of each binding field, so
429 // we only need to encode the differences between bindings. Hence the use of
430 // lastBinding.
431 static void encodeBinding(const OutputSection *osec, uint64_t outSecOff,
432                           int64_t addend, Binding &lastBinding,
433                           std::vector<BindIR> &opcodes) {
434   OutputSegment *seg = osec->parent;
435   uint64_t offset = osec->getSegmentOffset() + outSecOff;
436   if (lastBinding.segment != seg) {
437     opcodes.push_back(
438         {static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
439                               seg->index),
440          offset});
441     lastBinding.segment = seg;
442     lastBinding.offset = offset;
443   } else if (lastBinding.offset != offset) {
444     opcodes.push_back({BIND_OPCODE_ADD_ADDR_ULEB, offset - lastBinding.offset});
445     lastBinding.offset = offset;
446   }
447 
448   if (lastBinding.addend != addend) {
449     opcodes.push_back(
450         {BIND_OPCODE_SET_ADDEND_SLEB, static_cast<uint64_t>(addend)});
451     lastBinding.addend = addend;
452   }
453 
454   opcodes.push_back({BIND_OPCODE_DO_BIND, 0});
455   // DO_BIND causes dyld to both perform the binding and increment the offset
456   lastBinding.offset += target->wordSize;
457 }
458 
459 static void optimizeOpcodes(std::vector<BindIR> &opcodes) {
460   // Pass 1: Combine bind/add pairs
461   size_t i;
462   int pWrite = 0;
463   for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
464     if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) &&
465         (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) {
466       opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
467       opcodes[pWrite].data = opcodes[i].data;
468       ++i;
469     } else {
470       opcodes[pWrite] = opcodes[i - 1];
471     }
472   }
473   if (i == opcodes.size())
474     opcodes[pWrite] = opcodes[i - 1];
475   opcodes.resize(pWrite + 1);
476 
477   // Pass 2: Compress two or more bind_add opcodes
478   pWrite = 0;
479   for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
480     if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
481         (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
482         (opcodes[i].data == opcodes[i - 1].data)) {
483       opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
484       opcodes[pWrite].consecutiveCount = 2;
485       opcodes[pWrite].data = opcodes[i].data;
486       ++i;
487       while (i < opcodes.size() &&
488              (opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
489              (opcodes[i].data == opcodes[i - 1].data)) {
490         opcodes[pWrite].consecutiveCount++;
491         ++i;
492       }
493     } else {
494       opcodes[pWrite] = opcodes[i - 1];
495     }
496   }
497   if (i == opcodes.size())
498     opcodes[pWrite] = opcodes[i - 1];
499   opcodes.resize(pWrite + 1);
500 
501   // Pass 3: Use immediate encodings
502   // Every binding is the size of one pointer. If the next binding is a
503   // multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the
504   // opcode can be scaled by wordSize into a single byte and dyld will
505   // expand it to the correct address.
506   for (auto &p : opcodes) {
507     // It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK,
508     // but ld64 currently does this. This could be a potential bug, but
509     // for now, perform the same behavior to prevent mysterious bugs.
510     if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
511         ((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) &&
512         ((p.data % target->wordSize) == 0)) {
513       p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
514       p.data /= target->wordSize;
515     }
516   }
517 }
518 
519 static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) {
520   uint8_t opcode = op.opcode & BIND_OPCODE_MASK;
521   switch (opcode) {
522   case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
523   case BIND_OPCODE_ADD_ADDR_ULEB:
524   case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
525     os << op.opcode;
526     encodeULEB128(op.data, os);
527     break;
528   case BIND_OPCODE_SET_ADDEND_SLEB:
529     os << op.opcode;
530     encodeSLEB128(static_cast<int64_t>(op.data), os);
531     break;
532   case BIND_OPCODE_DO_BIND:
533     os << op.opcode;
534     break;
535   case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
536     os << op.opcode;
537     encodeULEB128(op.consecutiveCount, os);
538     encodeULEB128(op.data, os);
539     break;
540   case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
541     os << static_cast<uint8_t>(op.opcode | op.data);
542     break;
543   default:
544     llvm_unreachable("cannot bind to an unrecognized symbol");
545   }
546 }
547 
548 static bool needsWeakBind(const Symbol &sym) {
549   if (auto *dysym = dyn_cast<DylibSymbol>(&sym))
550     return dysym->isWeakDef();
551   if (auto *defined = dyn_cast<Defined>(&sym))
552     return defined->isExternalWeakDef();
553   return false;
554 }
555 
556 // Non-weak bindings need to have their dylib ordinal encoded as well.
557 static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) {
558   if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup())
559     return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP);
560   assert(dysym.getFile()->isReferenced());
561   return dysym.getFile()->ordinal;
562 }
563 
564 static int16_t ordinalForSymbol(const Symbol &sym) {
565   if (config->emitChainedFixups && needsWeakBind(sym))
566     return BIND_SPECIAL_DYLIB_WEAK_LOOKUP;
567   if (const auto *dysym = dyn_cast<DylibSymbol>(&sym))
568     return ordinalForDylibSymbol(*dysym);
569   assert(cast<Defined>(&sym)->interposable);
570   return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
571 }
572 
573 static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) {
574   if (ordinal <= 0) {
575     os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |
576                                (ordinal & BIND_IMMEDIATE_MASK));
577   } else if (ordinal <= BIND_IMMEDIATE_MASK) {
578     os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);
579   } else {
580     os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
581     encodeULEB128(ordinal, os);
582   }
583 }
584 
585 static void encodeWeakOverride(const Defined *defined,
586                                raw_svector_ostream &os) {
587   os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM |
588                              BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION)
589      << defined->getName() << '\0';
590 }
591 
592 // Organize the bindings so we can encoded them with fewer opcodes.
593 //
594 // First, all bindings for a given symbol should be grouped together.
595 // BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it
596 // has an associated symbol string), so we only want to emit it once per symbol.
597 //
598 // Within each group, we sort the bindings by address. Since bindings are
599 // delta-encoded, sorting them allows for a more compact result. Note that
600 // sorting by address alone ensures that bindings for the same segment / section
601 // are located together, minimizing the number of times we have to emit
602 // BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB.
603 //
604 // Finally, we sort the symbols by the address of their first binding, again
605 // to facilitate the delta-encoding process.
606 template <class Sym>
607 std::vector<std::pair<const Sym *, std::vector<BindingEntry>>>
608 sortBindings(const BindingsMap<const Sym *> &bindingsMap) {
609   std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec(
610       bindingsMap.begin(), bindingsMap.end());
611   for (auto &p : bindingsVec) {
612     std::vector<BindingEntry> &bindings = p.second;
613     llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {
614       return a.target.getVA() < b.target.getVA();
615     });
616   }
617   llvm::sort(bindingsVec, [](const auto &a, const auto &b) {
618     return a.second[0].target.getVA() < b.second[0].target.getVA();
619   });
620   return bindingsVec;
621 }
622 
623 // Emit bind opcodes, which are a stream of byte-sized opcodes that dyld
624 // interprets to update a record with the following fields:
625 //  * segment index (of the segment to write the symbol addresses to, typically
626 //    the __DATA_CONST segment which contains the GOT)
627 //  * offset within the segment, indicating the next location to write a binding
628 //  * symbol type
629 //  * symbol library ordinal (the index of its library's LC_LOAD_DYLIB command)
630 //  * symbol name
631 //  * addend
632 // When dyld sees BIND_OPCODE_DO_BIND, it uses the current record state to bind
633 // a symbol in the GOT, and increments the segment offset to point to the next
634 // entry. It does *not* clear the record state after doing the bind, so
635 // subsequent opcodes only need to encode the differences between bindings.
636 void BindingSection::finalizeContents() {
637   raw_svector_ostream os{contents};
638   Binding lastBinding;
639   int16_t lastOrdinal = 0;
640 
641   for (auto &p : sortBindings(bindingsMap)) {
642     const Symbol *sym = p.first;
643     std::vector<BindingEntry> &bindings = p.second;
644     uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
645     if (sym->isWeakRef())
646       flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
647     os << flags << sym->getName() << '\0'
648        << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
649     int16_t ordinal = ordinalForSymbol(*sym);
650     if (ordinal != lastOrdinal) {
651       encodeDylibOrdinal(ordinal, os);
652       lastOrdinal = ordinal;
653     }
654     std::vector<BindIR> opcodes;
655     for (const BindingEntry &b : bindings)
656       encodeBinding(b.target.isec->parent,
657                     b.target.isec->getOffset(b.target.offset), b.addend,
658                     lastBinding, opcodes);
659     if (config->optimize > 1)
660       optimizeOpcodes(opcodes);
661     for (const auto &op : opcodes)
662       flushOpcodes(op, os);
663   }
664   if (!bindingsMap.empty())
665     os << static_cast<uint8_t>(BIND_OPCODE_DONE);
666 }
667 
668 void BindingSection::writeTo(uint8_t *buf) const {
669   memcpy(buf, contents.data(), contents.size());
670 }
671 
672 WeakBindingSection::WeakBindingSection()
673     : LinkEditSection(segment_names::linkEdit, section_names::weakBinding) {}
674 
675 void WeakBindingSection::finalizeContents() {
676   raw_svector_ostream os{contents};
677   Binding lastBinding;
678 
679   for (const Defined *defined : definitions)
680     encodeWeakOverride(defined, os);
681 
682   for (auto &p : sortBindings(bindingsMap)) {
683     const Symbol *sym = p.first;
684     std::vector<BindingEntry> &bindings = p.second;
685     os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
686        << sym->getName() << '\0'
687        << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
688     std::vector<BindIR> opcodes;
689     for (const BindingEntry &b : bindings)
690       encodeBinding(b.target.isec->parent,
691                     b.target.isec->getOffset(b.target.offset), b.addend,
692                     lastBinding, opcodes);
693     if (config->optimize > 1)
694       optimizeOpcodes(opcodes);
695     for (const auto &op : opcodes)
696       flushOpcodes(op, os);
697   }
698   if (!bindingsMap.empty() || !definitions.empty())
699     os << static_cast<uint8_t>(BIND_OPCODE_DONE);
700 }
701 
702 void WeakBindingSection::writeTo(uint8_t *buf) const {
703   memcpy(buf, contents.data(), contents.size());
704 }
705 
706 StubsSection::StubsSection()
707     : SyntheticSection(segment_names::text, section_names::stubs) {
708   flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
709   // The stubs section comprises machine instructions, which are aligned to
710   // 4 bytes on the archs we care about.
711   align = 4;
712   reserved2 = target->stubSize;
713 }
714 
715 uint64_t StubsSection::getSize() const {
716   return entries.size() * target->stubSize;
717 }
718 
719 void StubsSection::writeTo(uint8_t *buf) const {
720   size_t off = 0;
721   for (const Symbol *sym : entries) {
722     uint64_t pointerVA =
723         config->emitChainedFixups ? sym->getGotVA() : sym->getLazyPtrVA();
724     target->writeStub(buf + off, *sym, pointerVA);
725     off += target->stubSize;
726   }
727 }
728 
729 void StubsSection::finalize() { isFinal = true; }
730 
731 static void addBindingsForStub(Symbol *sym) {
732   assert(!config->emitChainedFixups);
733   if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
734     if (sym->isWeakDef()) {
735       in.binding->addEntry(dysym, in.lazyPointers->isec,
736                            sym->stubsIndex * target->wordSize);
737       in.weakBinding->addEntry(sym, in.lazyPointers->isec,
738                                sym->stubsIndex * target->wordSize);
739     } else {
740       in.lazyBinding->addEntry(dysym);
741     }
742   } else if (auto *defined = dyn_cast<Defined>(sym)) {
743     if (defined->isExternalWeakDef()) {
744       in.rebase->addEntry(in.lazyPointers->isec,
745                           sym->stubsIndex * target->wordSize);
746       in.weakBinding->addEntry(sym, in.lazyPointers->isec,
747                                sym->stubsIndex * target->wordSize);
748     } else if (defined->interposable) {
749       in.lazyBinding->addEntry(sym);
750     } else {
751       llvm_unreachable("invalid stub target");
752     }
753   } else {
754     llvm_unreachable("invalid stub target symbol type");
755   }
756 }
757 
758 void StubsSection::addEntry(Symbol *sym) {
759   bool inserted = entries.insert(sym);
760   if (inserted) {
761     sym->stubsIndex = entries.size() - 1;
762 
763     if (config->emitChainedFixups)
764       in.got->addEntry(sym);
765     else
766       addBindingsForStub(sym);
767   }
768 }
769 
770 StubHelperSection::StubHelperSection()
771     : SyntheticSection(segment_names::text, section_names::stubHelper) {
772   flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
773   align = 4; // This section comprises machine instructions
774 }
775 
776 uint64_t StubHelperSection::getSize() const {
777   return target->stubHelperHeaderSize +
778          in.lazyBinding->getEntries().size() * target->stubHelperEntrySize;
779 }
780 
781 bool StubHelperSection::isNeeded() const { return in.lazyBinding->isNeeded(); }
782 
783 void StubHelperSection::writeTo(uint8_t *buf) const {
784   target->writeStubHelperHeader(buf);
785   size_t off = target->stubHelperHeaderSize;
786   for (const Symbol *sym : in.lazyBinding->getEntries()) {
787     target->writeStubHelperEntry(buf + off, *sym, addr + off);
788     off += target->stubHelperEntrySize;
789   }
790 }
791 
792 void StubHelperSection::setUp() {
793   Symbol *binder = symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr,
794                                         /*isWeakRef=*/false);
795   if (auto *undefined = dyn_cast<Undefined>(binder))
796     treatUndefinedSymbol(*undefined,
797                          "lazy binding (normally in libSystem.dylib)");
798 
799   // treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check.
800   stubBinder = dyn_cast_or_null<DylibSymbol>(binder);
801   if (stubBinder == nullptr)
802     return;
803 
804   in.got->addEntry(stubBinder);
805 
806   in.imageLoaderCache->parent =
807       ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache);
808   addInputSection(in.imageLoaderCache);
809   // Since this isn't in the symbol table or in any input file, the noDeadStrip
810   // argument doesn't matter.
811   dyldPrivate =
812       make<Defined>("__dyld_private", nullptr, in.imageLoaderCache, 0, 0,
813                     /*isWeakDef=*/false,
814                     /*isExternal=*/false, /*isPrivateExtern=*/false,
815                     /*includeInSymtab=*/true,
816                     /*isReferencedDynamically=*/false,
817                     /*noDeadStrip=*/false);
818   dyldPrivate->used = true;
819 }
820 
821 llvm::DenseMap<llvm::CachedHashStringRef, ConcatInputSection *>
822     ObjCSelRefsHelper::methnameToSelref;
823 void ObjCSelRefsHelper::initialize() {
824   // Do not fold selrefs without ICF.
825   if (config->icfLevel == ICFLevel::none)
826     return;
827 
828   // Search methnames already referenced in __objc_selrefs
829   // Map the name to the corresponding selref entry
830   // which we will reuse when creating objc stubs.
831   for (ConcatInputSection *isec : inputSections) {
832     if (isec->shouldOmitFromOutput())
833       continue;
834     if (isec->getName() != section_names::objcSelrefs)
835       continue;
836     // We expect a single relocation per selref entry to __objc_methname that
837     // might be aggregated.
838     assert(isec->relocs.size() == 1);
839     auto Reloc = isec->relocs[0];
840     if (const auto *sym = Reloc.referent.dyn_cast<Symbol *>()) {
841       if (const auto *d = dyn_cast<Defined>(sym)) {
842         auto *cisec = cast<CStringInputSection>(d->isec());
843         auto methname = cisec->getStringRefAtOffset(d->value);
844         methnameToSelref[CachedHashStringRef(methname)] = isec;
845       }
846     }
847   }
848 }
849 
850 void ObjCSelRefsHelper::cleanup() { methnameToSelref.clear(); }
851 
852 ConcatInputSection *ObjCSelRefsHelper::makeSelRef(StringRef methname) {
853   auto methnameOffset =
854       in.objcMethnameSection->getStringOffset(methname).outSecOff;
855 
856   size_t wordSize = target->wordSize;
857   uint8_t *selrefData = bAlloc().Allocate<uint8_t>(wordSize);
858   write64le(selrefData, methnameOffset);
859   ConcatInputSection *objcSelref =
860       makeSyntheticInputSection(segment_names::data, section_names::objcSelrefs,
861                                 S_LITERAL_POINTERS | S_ATTR_NO_DEAD_STRIP,
862                                 ArrayRef<uint8_t>{selrefData, wordSize},
863                                 /*align=*/wordSize);
864   assert(objcSelref->live);
865   objcSelref->relocs.push_back({/*type=*/target->unsignedRelocType,
866                                 /*pcrel=*/false, /*length=*/3,
867                                 /*offset=*/0,
868                                 /*addend=*/static_cast<int64_t>(methnameOffset),
869                                 /*referent=*/in.objcMethnameSection->isec});
870   objcSelref->parent = ConcatOutputSection::getOrCreateForInput(objcSelref);
871   addInputSection(objcSelref);
872   objcSelref->isFinal = true;
873   methnameToSelref[CachedHashStringRef(methname)] = objcSelref;
874   return objcSelref;
875 }
876 
877 ConcatInputSection *ObjCSelRefsHelper::getSelRef(StringRef methname) {
878   auto it = methnameToSelref.find(CachedHashStringRef(methname));
879   if (it == methnameToSelref.end())
880     return nullptr;
881   return it->second;
882 }
883 
884 ObjCStubsSection::ObjCStubsSection()
885     : SyntheticSection(segment_names::text, section_names::objcStubs) {
886   flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
887   align = config->objcStubsMode == ObjCStubsMode::fast
888               ? target->objcStubsFastAlignment
889               : target->objcStubsSmallAlignment;
890 }
891 
892 bool ObjCStubsSection::isObjCStubSymbol(Symbol *sym) {
893   return sym->getName().starts_with(symbolPrefix);
894 }
895 
896 StringRef ObjCStubsSection::getMethname(Symbol *sym) {
897   assert(isObjCStubSymbol(sym) && "not an objc stub");
898   auto name = sym->getName();
899   StringRef methname = name.drop_front(symbolPrefix.size());
900   return methname;
901 }
902 
903 void ObjCStubsSection::addEntry(Symbol *sym) {
904   StringRef methname = getMethname(sym);
905   // We create a selref entry for each unique methname.
906   if (!ObjCSelRefsHelper::getSelRef(methname))
907     ObjCSelRefsHelper::makeSelRef(methname);
908 
909   auto stubSize = config->objcStubsMode == ObjCStubsMode::fast
910                       ? target->objcStubsFastSize
911                       : target->objcStubsSmallSize;
912   Defined *newSym = replaceSymbol<Defined>(
913       sym, sym->getName(), nullptr, isec,
914       /*value=*/symbols.size() * stubSize,
915       /*size=*/stubSize,
916       /*isWeakDef=*/false, /*isExternal=*/true, /*isPrivateExtern=*/true,
917       /*includeInSymtab=*/true, /*isReferencedDynamically=*/false,
918       /*noDeadStrip=*/false);
919   symbols.push_back(newSym);
920 }
921 
922 void ObjCStubsSection::setUp() {
923   objcMsgSend = symtab->addUndefined("_objc_msgSend", /*file=*/nullptr,
924                                      /*isWeakRef=*/false);
925   if (auto *undefined = dyn_cast<Undefined>(objcMsgSend))
926     treatUndefinedSymbol(*undefined,
927                          "lazy binding (normally in libobjc.dylib)");
928   objcMsgSend->used = true;
929   if (config->objcStubsMode == ObjCStubsMode::fast) {
930     in.got->addEntry(objcMsgSend);
931     assert(objcMsgSend->isInGot());
932   } else {
933     assert(config->objcStubsMode == ObjCStubsMode::small);
934     // In line with ld64's behavior, when objc_msgSend is a direct symbol,
935     // we directly reference it.
936     // In other cases, typically when binding in libobjc.dylib,
937     // we generate a stub to invoke objc_msgSend.
938     if (!isa<Defined>(objcMsgSend))
939       in.stubs->addEntry(objcMsgSend);
940   }
941 }
942 
943 uint64_t ObjCStubsSection::getSize() const {
944   auto stubSize = config->objcStubsMode == ObjCStubsMode::fast
945                       ? target->objcStubsFastSize
946                       : target->objcStubsSmallSize;
947   return stubSize * symbols.size();
948 }
949 
950 void ObjCStubsSection::writeTo(uint8_t *buf) const {
951   uint64_t stubOffset = 0;
952   for (size_t i = 0, n = symbols.size(); i < n; ++i) {
953     Defined *sym = symbols[i];
954 
955     auto methname = getMethname(sym);
956     InputSection *selRef = ObjCSelRefsHelper::getSelRef(methname);
957     assert(selRef != nullptr && "no selref for methname");
958     auto selrefAddr = selRef->getVA(0);
959     target->writeObjCMsgSendStub(buf + stubOffset, sym, in.objcStubs->addr,
960                                  stubOffset, selrefAddr, objcMsgSend);
961   }
962 }
963 
964 LazyPointerSection::LazyPointerSection()
965     : SyntheticSection(segment_names::data, section_names::lazySymbolPtr) {
966   align = target->wordSize;
967   flags = S_LAZY_SYMBOL_POINTERS;
968 }
969 
970 uint64_t LazyPointerSection::getSize() const {
971   return in.stubs->getEntries().size() * target->wordSize;
972 }
973 
974 bool LazyPointerSection::isNeeded() const {
975   return !in.stubs->getEntries().empty();
976 }
977 
978 void LazyPointerSection::writeTo(uint8_t *buf) const {
979   size_t off = 0;
980   for (const Symbol *sym : in.stubs->getEntries()) {
981     if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {
982       if (dysym->hasStubsHelper()) {
983         uint64_t stubHelperOffset =
984             target->stubHelperHeaderSize +
985             dysym->stubsHelperIndex * target->stubHelperEntrySize;
986         write64le(buf + off, in.stubHelper->addr + stubHelperOffset);
987       }
988     } else {
989       write64le(buf + off, sym->getVA());
990     }
991     off += target->wordSize;
992   }
993 }
994 
995 LazyBindingSection::LazyBindingSection()
996     : LinkEditSection(segment_names::linkEdit, section_names::lazyBinding) {}
997 
998 void LazyBindingSection::finalizeContents() {
999   // TODO: Just precompute output size here instead of writing to a temporary
1000   // buffer
1001   for (Symbol *sym : entries)
1002     sym->lazyBindOffset = encode(*sym);
1003 }
1004 
1005 void LazyBindingSection::writeTo(uint8_t *buf) const {
1006   memcpy(buf, contents.data(), contents.size());
1007 }
1008 
1009 void LazyBindingSection::addEntry(Symbol *sym) {
1010   assert(!config->emitChainedFixups && "Chained fixups always bind eagerly");
1011   if (entries.insert(sym)) {
1012     sym->stubsHelperIndex = entries.size() - 1;
1013     in.rebase->addEntry(in.lazyPointers->isec,
1014                         sym->stubsIndex * target->wordSize);
1015   }
1016 }
1017 
1018 // Unlike the non-lazy binding section, the bind opcodes in this section aren't
1019 // interpreted all at once. Rather, dyld will start interpreting opcodes at a
1020 // given offset, typically only binding a single symbol before it finds a
1021 // BIND_OPCODE_DONE terminator. As such, unlike in the non-lazy-binding case,
1022 // we cannot encode just the differences between symbols; we have to emit the
1023 // complete bind information for each symbol.
1024 uint32_t LazyBindingSection::encode(const Symbol &sym) {
1025   uint32_t opstreamOffset = contents.size();
1026   OutputSegment *dataSeg = in.lazyPointers->parent;
1027   os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
1028                              dataSeg->index);
1029   uint64_t offset =
1030       in.lazyPointers->addr - dataSeg->addr + sym.stubsIndex * target->wordSize;
1031   encodeULEB128(offset, os);
1032   encodeDylibOrdinal(ordinalForSymbol(sym), os);
1033 
1034   uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
1035   if (sym.isWeakRef())
1036     flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
1037 
1038   os << flags << sym.getName() << '\0'
1039      << static_cast<uint8_t>(BIND_OPCODE_DO_BIND)
1040      << static_cast<uint8_t>(BIND_OPCODE_DONE);
1041   return opstreamOffset;
1042 }
1043 
1044 ExportSection::ExportSection()
1045     : LinkEditSection(segment_names::linkEdit, section_names::export_) {}
1046 
1047 void ExportSection::finalizeContents() {
1048   trieBuilder.setImageBase(in.header->addr);
1049   for (const Symbol *sym : symtab->getSymbols()) {
1050     if (const auto *defined = dyn_cast<Defined>(sym)) {
1051       if (defined->privateExtern || !defined->isLive())
1052         continue;
1053       trieBuilder.addSymbol(*defined);
1054       hasWeakSymbol = hasWeakSymbol || sym->isWeakDef();
1055     } else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
1056       if (dysym->shouldReexport)
1057         trieBuilder.addSymbol(*dysym);
1058     }
1059   }
1060   size = trieBuilder.build();
1061 }
1062 
1063 void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); }
1064 
1065 DataInCodeSection::DataInCodeSection()
1066     : LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {}
1067 
1068 template <class LP>
1069 static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() {
1070   std::vector<MachO::data_in_code_entry> dataInCodeEntries;
1071   for (const InputFile *inputFile : inputFiles) {
1072     if (!isa<ObjFile>(inputFile))
1073       continue;
1074     const ObjFile *objFile = cast<ObjFile>(inputFile);
1075     ArrayRef<MachO::data_in_code_entry> entries = objFile->getDataInCode();
1076     if (entries.empty())
1077       continue;
1078 
1079     std::vector<MachO::data_in_code_entry> sortedEntries;
1080     sortedEntries.assign(entries.begin(), entries.end());
1081     llvm::sort(sortedEntries, [](const data_in_code_entry &lhs,
1082                                  const data_in_code_entry &rhs) {
1083       return lhs.offset < rhs.offset;
1084     });
1085 
1086     // For each code subsection find 'data in code' entries residing in it.
1087     // Compute the new offset values as
1088     // <offset within subsection> + <subsection address> - <__TEXT address>.
1089     for (const Section *section : objFile->sections) {
1090       for (const Subsection &subsec : section->subsections) {
1091         const InputSection *isec = subsec.isec;
1092         if (!isCodeSection(isec))
1093           continue;
1094         if (cast<ConcatInputSection>(isec)->shouldOmitFromOutput())
1095           continue;
1096         const uint64_t beginAddr = section->addr + subsec.offset;
1097         auto it = llvm::lower_bound(
1098             sortedEntries, beginAddr,
1099             [](const MachO::data_in_code_entry &entry, uint64_t addr) {
1100               return entry.offset < addr;
1101             });
1102         const uint64_t endAddr = beginAddr + isec->getSize();
1103         for (const auto end = sortedEntries.end();
1104              it != end && it->offset + it->length <= endAddr; ++it)
1105           dataInCodeEntries.push_back(
1106               {static_cast<uint32_t>(isec->getVA(it->offset - beginAddr) -
1107                                      in.header->addr),
1108                it->length, it->kind});
1109       }
1110     }
1111   }
1112 
1113   // ld64 emits the table in sorted order too.
1114   llvm::sort(dataInCodeEntries,
1115              [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) {
1116                return lhs.offset < rhs.offset;
1117              });
1118   return dataInCodeEntries;
1119 }
1120 
1121 void DataInCodeSection::finalizeContents() {
1122   entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>()
1123                                   : collectDataInCodeEntries<ILP32>();
1124 }
1125 
1126 void DataInCodeSection::writeTo(uint8_t *buf) const {
1127   if (!entries.empty())
1128     memcpy(buf, entries.data(), getRawSize());
1129 }
1130 
1131 FunctionStartsSection::FunctionStartsSection()
1132     : LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {}
1133 
1134 void FunctionStartsSection::finalizeContents() {
1135   raw_svector_ostream os{contents};
1136   std::vector<uint64_t> addrs;
1137   for (const InputFile *file : inputFiles) {
1138     if (auto *objFile = dyn_cast<ObjFile>(file)) {
1139       for (const Symbol *sym : objFile->symbols) {
1140         if (const auto *defined = dyn_cast_or_null<Defined>(sym)) {
1141           if (!defined->isec() || !isCodeSection(defined->isec()) ||
1142               !defined->isLive())
1143             continue;
1144           addrs.push_back(defined->getVA());
1145         }
1146       }
1147     }
1148   }
1149   llvm::sort(addrs);
1150   uint64_t addr = in.header->addr;
1151   for (uint64_t nextAddr : addrs) {
1152     uint64_t delta = nextAddr - addr;
1153     if (delta == 0)
1154       continue;
1155     encodeULEB128(delta, os);
1156     addr = nextAddr;
1157   }
1158   os << '\0';
1159 }
1160 
1161 void FunctionStartsSection::writeTo(uint8_t *buf) const {
1162   memcpy(buf, contents.data(), contents.size());
1163 }
1164 
1165 SymtabSection::SymtabSection(StringTableSection &stringTableSection)
1166     : LinkEditSection(segment_names::linkEdit, section_names::symbolTable),
1167       stringTableSection(stringTableSection) {}
1168 
1169 void SymtabSection::emitBeginSourceStab(StringRef sourceFile) {
1170   StabsEntry stab(N_SO);
1171   stab.strx = stringTableSection.addString(saver().save(sourceFile));
1172   stabs.emplace_back(std::move(stab));
1173 }
1174 
1175 void SymtabSection::emitEndSourceStab() {
1176   StabsEntry stab(N_SO);
1177   stab.sect = 1;
1178   stabs.emplace_back(std::move(stab));
1179 }
1180 
1181 void SymtabSection::emitObjectFileStab(ObjFile *file) {
1182   StabsEntry stab(N_OSO);
1183   stab.sect = target->cpuSubtype;
1184   SmallString<261> path(!file->archiveName.empty() ? file->archiveName
1185                                                    : file->getName());
1186   std::error_code ec = sys::fs::make_absolute(path);
1187   if (ec)
1188     fatal("failed to get absolute path for " + path);
1189 
1190   if (!file->archiveName.empty())
1191     path.append({"(", file->getName(), ")"});
1192 
1193   StringRef adjustedPath = saver().save(path.str());
1194   adjustedPath.consume_front(config->osoPrefix);
1195 
1196   stab.strx = stringTableSection.addString(adjustedPath);
1197   stab.desc = 1;
1198   stab.value = file->modTime;
1199   stabs.emplace_back(std::move(stab));
1200 }
1201 
1202 void SymtabSection::emitEndFunStab(Defined *defined) {
1203   StabsEntry stab(N_FUN);
1204   stab.value = defined->size;
1205   stabs.emplace_back(std::move(stab));
1206 }
1207 
1208 // Given a pointer to a function symbol, return the symbol that points to the
1209 // actual function body that will go in the final binary. Generally this is the
1210 // symbol itself, but if the symbol was folded using a thunk, we retrieve the
1211 // target function body from the thunk.
1212 Defined *SymtabSection::getFuncBodySym(Defined *originalSym) {
1213   if (originalSym->identicalCodeFoldingKind == Symbol::ICFFoldKind::None ||
1214       originalSym->identicalCodeFoldingKind == Symbol::ICFFoldKind::Body)
1215     return originalSym;
1216 
1217   return macho::getBodyForThunkFoldedSym(originalSym);
1218 }
1219 
1220 void SymtabSection::emitStabs() {
1221   if (config->omitDebugInfo)
1222     return;
1223 
1224   for (const std::string &s : config->astPaths) {
1225     StabsEntry astStab(N_AST);
1226     astStab.strx = stringTableSection.addString(s);
1227     stabs.emplace_back(std::move(astStab));
1228   }
1229 
1230   // Cache the file ID for each symbol in an std::pair for faster sorting.
1231   using SortingPair = std::pair<Defined *, int>;
1232   std::vector<SortingPair> symbolsNeedingStabs;
1233   for (const SymtabEntry &entry :
1234        concat<SymtabEntry>(localSymbols, externalSymbols)) {
1235     Symbol *sym = entry.sym;
1236     assert(sym->isLive() &&
1237            "dead symbols should not be in localSymbols, externalSymbols");
1238     if (auto *defined = dyn_cast<Defined>(sym)) {
1239       // Excluded symbols should have been filtered out in finalizeContents().
1240       assert(defined->includeInSymtab);
1241 
1242       if (defined->isAbsolute())
1243         continue;
1244 
1245       // Constant-folded symbols go in the executable's symbol table, but don't
1246       // get a stabs entry unless --keep-icf-stabs flag is specified.
1247       if (!config->keepICFStabs &&
1248           defined->identicalCodeFoldingKind != Symbol::ICFFoldKind::None)
1249         continue;
1250 
1251       ObjFile *file = defined->getObjectFile();
1252       if (!file || !file->compileUnit)
1253         continue;
1254 
1255       // We use 'originalIsec' to get the file id of the symbol since 'isec()'
1256       // might point to the merged ICF symbol's file
1257       symbolsNeedingStabs.emplace_back(
1258           defined, getFuncBodySym(defined)->originalIsec->getFile()->id);
1259     }
1260   }
1261 
1262   llvm::stable_sort(symbolsNeedingStabs,
1263                     [&](const SortingPair &a, const SortingPair &b) {
1264                       return a.second < b.second;
1265                     });
1266 
1267   // Emit STABS symbols so that dsymutil and/or the debugger can map address
1268   // regions in the final binary to the source and object files from which they
1269   // originated.
1270   InputFile *lastFile = nullptr;
1271   for (SortingPair &pair : symbolsNeedingStabs) {
1272     Defined *defined = pair.first;
1273     // We use 'originalIsec' of the symbol since we care about the actual origin
1274     // of the symbol, not the canonical location returned by `isec()`.
1275     Defined *funcBodySym = getFuncBodySym(defined);
1276     InputSection *isec = funcBodySym->originalIsec;
1277     ObjFile *file = cast<ObjFile>(isec->getFile());
1278 
1279     if (lastFile == nullptr || lastFile != file) {
1280       if (lastFile != nullptr)
1281         emitEndSourceStab();
1282       lastFile = file;
1283 
1284       emitBeginSourceStab(file->sourceFile());
1285       emitObjectFileStab(file);
1286     }
1287 
1288     StabsEntry symStab;
1289     symStab.sect = isec->parent->index;
1290     symStab.strx = stringTableSection.addString(defined->getName());
1291     symStab.value = funcBodySym->getVA();
1292 
1293     if (isCodeSection(isec)) {
1294       symStab.type = N_FUN;
1295       stabs.emplace_back(std::move(symStab));
1296       emitEndFunStab(funcBodySym);
1297     } else {
1298       symStab.type = defined->isExternal() ? N_GSYM : N_STSYM;
1299       stabs.emplace_back(std::move(symStab));
1300     }
1301   }
1302 
1303   if (!stabs.empty())
1304     emitEndSourceStab();
1305 }
1306 
1307 void SymtabSection::finalizeContents() {
1308   auto addSymbol = [&](std::vector<SymtabEntry> &symbols, Symbol *sym) {
1309     uint32_t strx = stringTableSection.addString(sym->getName());
1310     symbols.push_back({sym, strx});
1311   };
1312 
1313   std::function<void(Symbol *)> localSymbolsHandler;
1314   switch (config->localSymbolsPresence) {
1315   case SymtabPresence::All:
1316     localSymbolsHandler = [&](Symbol *sym) { addSymbol(localSymbols, sym); };
1317     break;
1318   case SymtabPresence::None:
1319     localSymbolsHandler = [&](Symbol *) { /* Do nothing*/ };
1320     break;
1321   case SymtabPresence::SelectivelyIncluded:
1322     localSymbolsHandler = [&](Symbol *sym) {
1323       if (config->localSymbolPatterns.match(sym->getName()))
1324         addSymbol(localSymbols, sym);
1325     };
1326     break;
1327   case SymtabPresence::SelectivelyExcluded:
1328     localSymbolsHandler = [&](Symbol *sym) {
1329       if (!config->localSymbolPatterns.match(sym->getName()))
1330         addSymbol(localSymbols, sym);
1331     };
1332     break;
1333   }
1334 
1335   // Local symbols aren't in the SymbolTable, so we walk the list of object
1336   // files to gather them.
1337   // But if `-x` is set, then we don't need to. localSymbolsHandler() will do
1338   // the right thing regardless, but this check is a perf optimization because
1339   // iterating through all the input files and their symbols is expensive.
1340   if (config->localSymbolsPresence != SymtabPresence::None) {
1341     for (const InputFile *file : inputFiles) {
1342       if (auto *objFile = dyn_cast<ObjFile>(file)) {
1343         for (Symbol *sym : objFile->symbols) {
1344           if (auto *defined = dyn_cast_or_null<Defined>(sym)) {
1345             if (defined->isExternal() || !defined->isLive() ||
1346                 !defined->includeInSymtab)
1347               continue;
1348             localSymbolsHandler(sym);
1349           }
1350         }
1351       }
1352     }
1353   }
1354 
1355   // __dyld_private is a local symbol too. It's linker-created and doesn't
1356   // exist in any object file.
1357   if (in.stubHelper && in.stubHelper->dyldPrivate)
1358     localSymbolsHandler(in.stubHelper->dyldPrivate);
1359 
1360   for (Symbol *sym : symtab->getSymbols()) {
1361     if (!sym->isLive())
1362       continue;
1363     if (auto *defined = dyn_cast<Defined>(sym)) {
1364       if (!defined->includeInSymtab)
1365         continue;
1366       assert(defined->isExternal());
1367       if (defined->privateExtern)
1368         localSymbolsHandler(defined);
1369       else
1370         addSymbol(externalSymbols, defined);
1371     } else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {
1372       if (dysym->isReferenced())
1373         addSymbol(undefinedSymbols, sym);
1374     }
1375   }
1376 
1377   emitStabs();
1378   uint32_t symtabIndex = stabs.size();
1379   for (const SymtabEntry &entry :
1380        concat<SymtabEntry>(localSymbols, externalSymbols, undefinedSymbols)) {
1381     entry.sym->symtabIndex = symtabIndex++;
1382   }
1383 }
1384 
1385 uint32_t SymtabSection::getNumSymbols() const {
1386   return stabs.size() + localSymbols.size() + externalSymbols.size() +
1387          undefinedSymbols.size();
1388 }
1389 
1390 // This serves to hide (type-erase) the template parameter from SymtabSection.
1391 template <class LP> class SymtabSectionImpl final : public SymtabSection {
1392 public:
1393   SymtabSectionImpl(StringTableSection &stringTableSection)
1394       : SymtabSection(stringTableSection) {}
1395   uint64_t getRawSize() const override;
1396   void writeTo(uint8_t *buf) const override;
1397 };
1398 
1399 template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const {
1400   return getNumSymbols() * sizeof(typename LP::nlist);
1401 }
1402 
1403 template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const {
1404   auto *nList = reinterpret_cast<typename LP::nlist *>(buf);
1405   // Emit the stabs entries before the "real" symbols. We cannot emit them
1406   // after as that would render Symbol::symtabIndex inaccurate.
1407   for (const StabsEntry &entry : stabs) {
1408     nList->n_strx = entry.strx;
1409     nList->n_type = entry.type;
1410     nList->n_sect = entry.sect;
1411     nList->n_desc = entry.desc;
1412     nList->n_value = entry.value;
1413     ++nList;
1414   }
1415 
1416   for (const SymtabEntry &entry : concat<const SymtabEntry>(
1417            localSymbols, externalSymbols, undefinedSymbols)) {
1418     nList->n_strx = entry.strx;
1419     // TODO populate n_desc with more flags
1420     if (auto *defined = dyn_cast<Defined>(entry.sym)) {
1421       uint8_t scope = 0;
1422       if (defined->privateExtern) {
1423         // Private external -- dylib scoped symbol.
1424         // Promote to non-external at link time.
1425         scope = N_PEXT;
1426       } else if (defined->isExternal()) {
1427         // Normal global symbol.
1428         scope = N_EXT;
1429       } else {
1430         // TU-local symbol from localSymbols.
1431         scope = 0;
1432       }
1433 
1434       if (defined->isAbsolute()) {
1435         nList->n_type = scope | N_ABS;
1436         nList->n_sect = NO_SECT;
1437         nList->n_value = defined->value;
1438       } else {
1439         nList->n_type = scope | N_SECT;
1440         nList->n_sect = defined->isec()->parent->index;
1441         // For the N_SECT symbol type, n_value is the address of the symbol
1442         nList->n_value = defined->getVA();
1443       }
1444       nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0;
1445       nList->n_desc |=
1446           defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0;
1447     } else if (auto *dysym = dyn_cast<DylibSymbol>(entry.sym)) {
1448       uint16_t n_desc = nList->n_desc;
1449       int16_t ordinal = ordinalForDylibSymbol(*dysym);
1450       if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP)
1451         SET_LIBRARY_ORDINAL(n_desc, DYNAMIC_LOOKUP_ORDINAL);
1452       else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE)
1453         SET_LIBRARY_ORDINAL(n_desc, EXECUTABLE_ORDINAL);
1454       else {
1455         assert(ordinal > 0);
1456         SET_LIBRARY_ORDINAL(n_desc, static_cast<uint8_t>(ordinal));
1457       }
1458 
1459       nList->n_type = N_EXT;
1460       n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0;
1461       n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0;
1462       nList->n_desc = n_desc;
1463     }
1464     ++nList;
1465   }
1466 }
1467 
1468 template <class LP>
1469 SymtabSection *
1470 macho::makeSymtabSection(StringTableSection &stringTableSection) {
1471   return make<SymtabSectionImpl<LP>>(stringTableSection);
1472 }
1473 
1474 IndirectSymtabSection::IndirectSymtabSection()
1475     : LinkEditSection(segment_names::linkEdit,
1476                       section_names::indirectSymbolTable) {}
1477 
1478 uint32_t IndirectSymtabSection::getNumSymbols() const {
1479   uint32_t size = in.got->getEntries().size() +
1480                   in.tlvPointers->getEntries().size() +
1481                   in.stubs->getEntries().size();
1482   if (!config->emitChainedFixups)
1483     size += in.stubs->getEntries().size();
1484   return size;
1485 }
1486 
1487 bool IndirectSymtabSection::isNeeded() const {
1488   return in.got->isNeeded() || in.tlvPointers->isNeeded() ||
1489          in.stubs->isNeeded();
1490 }
1491 
1492 void IndirectSymtabSection::finalizeContents() {
1493   uint32_t off = 0;
1494   in.got->reserved1 = off;
1495   off += in.got->getEntries().size();
1496   in.tlvPointers->reserved1 = off;
1497   off += in.tlvPointers->getEntries().size();
1498   in.stubs->reserved1 = off;
1499   if (in.lazyPointers) {
1500     off += in.stubs->getEntries().size();
1501     in.lazyPointers->reserved1 = off;
1502   }
1503 }
1504 
1505 static uint32_t indirectValue(const Symbol *sym) {
1506   if (sym->symtabIndex == UINT32_MAX || !needsBinding(sym))
1507     return INDIRECT_SYMBOL_LOCAL;
1508   return sym->symtabIndex;
1509 }
1510 
1511 void IndirectSymtabSection::writeTo(uint8_t *buf) const {
1512   uint32_t off = 0;
1513   for (const Symbol *sym : in.got->getEntries()) {
1514     write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
1515     ++off;
1516   }
1517   for (const Symbol *sym : in.tlvPointers->getEntries()) {
1518     write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
1519     ++off;
1520   }
1521   for (const Symbol *sym : in.stubs->getEntries()) {
1522     write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
1523     ++off;
1524   }
1525 
1526   if (in.lazyPointers) {
1527     // There is a 1:1 correspondence between stubs and LazyPointerSection
1528     // entries. But giving __stubs and __la_symbol_ptr the same reserved1
1529     // (the offset into the indirect symbol table) so that they both refer
1530     // to the same range of offsets confuses `strip`, so write the stubs
1531     // symbol table offsets a second time.
1532     for (const Symbol *sym : in.stubs->getEntries()) {
1533       write32le(buf + off * sizeof(uint32_t), indirectValue(sym));
1534       ++off;
1535     }
1536   }
1537 }
1538 
1539 StringTableSection::StringTableSection()
1540     : LinkEditSection(segment_names::linkEdit, section_names::stringTable) {}
1541 
1542 uint32_t StringTableSection::addString(StringRef str) {
1543   uint32_t strx = size;
1544   if (config->dedupSymbolStrings) {
1545     llvm::CachedHashStringRef hashedStr(str);
1546     auto [it, inserted] = stringMap.try_emplace(hashedStr, strx);
1547     if (!inserted)
1548       return it->second;
1549   }
1550 
1551   strings.push_back(str);
1552   size += str.size() + 1; // account for null terminator
1553   return strx;
1554 }
1555 
1556 void StringTableSection::writeTo(uint8_t *buf) const {
1557   uint32_t off = 0;
1558   for (StringRef str : strings) {
1559     memcpy(buf + off, str.data(), str.size());
1560     off += str.size() + 1; // account for null terminator
1561   }
1562 }
1563 
1564 static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0);
1565 static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0);
1566 
1567 CodeSignatureSection::CodeSignatureSection()
1568     : LinkEditSection(segment_names::linkEdit, section_names::codeSignature) {
1569   align = 16; // required by libstuff
1570 
1571   // XXX: This mimics LD64, where it uses the install-name as codesign
1572   // identifier, if available.
1573   if (!config->installName.empty())
1574     fileName = config->installName;
1575   else
1576     // FIXME: Consider using finalOutput instead of outputFile.
1577     fileName = config->outputFile;
1578 
1579   size_t slashIndex = fileName.rfind("/");
1580   if (slashIndex != std::string::npos)
1581     fileName = fileName.drop_front(slashIndex + 1);
1582 
1583   // NOTE: Any changes to these calculations should be repeated
1584   // in llvm-objcopy's MachOLayoutBuilder::layoutTail.
1585   allHeadersSize = alignTo<16>(fixedHeadersSize + fileName.size() + 1);
1586   fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size();
1587 }
1588 
1589 uint32_t CodeSignatureSection::getBlockCount() const {
1590   return (fileOff + blockSize - 1) / blockSize;
1591 }
1592 
1593 uint64_t CodeSignatureSection::getRawSize() const {
1594   return allHeadersSize + getBlockCount() * hashSize;
1595 }
1596 
1597 void CodeSignatureSection::writeHashes(uint8_t *buf) const {
1598   // NOTE: Changes to this functionality should be repeated in llvm-objcopy's
1599   // MachOWriter::writeSignatureData.
1600   uint8_t *hashes = buf + fileOff + allHeadersSize;
1601   parallelFor(0, getBlockCount(), [&](size_t i) {
1602     sha256(buf + i * blockSize,
1603            std::min(static_cast<size_t>(fileOff - i * blockSize), blockSize),
1604            hashes + i * hashSize);
1605   });
1606 #if defined(__APPLE__)
1607   // This is macOS-specific work-around and makes no sense for any
1608   // other host OS. See https://openradar.appspot.com/FB8914231
1609   //
1610   // The macOS kernel maintains a signature-verification cache to
1611   // quickly validate applications at time of execve(2).  The trouble
1612   // is that for the kernel creates the cache entry at the time of the
1613   // mmap(2) call, before we have a chance to write either the code to
1614   // sign or the signature header+hashes.  The fix is to invalidate
1615   // all cached data associated with the output file, thus discarding
1616   // the bogus prematurely-cached signature.
1617   msync(buf, fileOff + getSize(), MS_INVALIDATE);
1618 #endif
1619 }
1620 
1621 void CodeSignatureSection::writeTo(uint8_t *buf) const {
1622   // NOTE: Changes to this functionality should be repeated in llvm-objcopy's
1623   // MachOWriter::writeSignatureData.
1624   uint32_t signatureSize = static_cast<uint32_t>(getSize());
1625   auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf);
1626   write32be(&superBlob->magic, CSMAGIC_EMBEDDED_SIGNATURE);
1627   write32be(&superBlob->length, signatureSize);
1628   write32be(&superBlob->count, 1);
1629   auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]);
1630   write32be(&blobIndex->type, CSSLOT_CODEDIRECTORY);
1631   write32be(&blobIndex->offset, blobHeadersSize);
1632   auto *codeDirectory =
1633       reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize);
1634   write32be(&codeDirectory->magic, CSMAGIC_CODEDIRECTORY);
1635   write32be(&codeDirectory->length, signatureSize - blobHeadersSize);
1636   write32be(&codeDirectory->version, CS_SUPPORTSEXECSEG);
1637   write32be(&codeDirectory->flags, CS_ADHOC | CS_LINKER_SIGNED);
1638   write32be(&codeDirectory->hashOffset,
1639             sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad);
1640   write32be(&codeDirectory->identOffset, sizeof(CS_CodeDirectory));
1641   codeDirectory->nSpecialSlots = 0;
1642   write32be(&codeDirectory->nCodeSlots, getBlockCount());
1643   write32be(&codeDirectory->codeLimit, fileOff);
1644   codeDirectory->hashSize = static_cast<uint8_t>(hashSize);
1645   codeDirectory->hashType = kSecCodeSignatureHashSHA256;
1646   codeDirectory->platform = 0;
1647   codeDirectory->pageSize = blockSizeShift;
1648   codeDirectory->spare2 = 0;
1649   codeDirectory->scatterOffset = 0;
1650   codeDirectory->teamOffset = 0;
1651   codeDirectory->spare3 = 0;
1652   codeDirectory->codeLimit64 = 0;
1653   OutputSegment *textSeg = getOrCreateOutputSegment(segment_names::text);
1654   write64be(&codeDirectory->execSegBase, textSeg->fileOff);
1655   write64be(&codeDirectory->execSegLimit, textSeg->fileSize);
1656   write64be(&codeDirectory->execSegFlags,
1657             config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0);
1658   auto *id = reinterpret_cast<char *>(&codeDirectory[1]);
1659   memcpy(id, fileName.begin(), fileName.size());
1660   memset(id + fileName.size(), 0, fileNamePad);
1661 }
1662 
1663 CStringSection::CStringSection(const char *name)
1664     : SyntheticSection(segment_names::text, name) {
1665   flags = S_CSTRING_LITERALS;
1666 }
1667 
1668 void CStringSection::addInput(CStringInputSection *isec) {
1669   isec->parent = this;
1670   inputs.push_back(isec);
1671   if (isec->align > align)
1672     align = isec->align;
1673 }
1674 
1675 void CStringSection::writeTo(uint8_t *buf) const {
1676   for (const CStringInputSection *isec : inputs) {
1677     for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {
1678       if (!piece.live)
1679         continue;
1680       StringRef string = isec->getStringRef(i);
1681       memcpy(buf + piece.outSecOff, string.data(), string.size());
1682     }
1683   }
1684 }
1685 
1686 void CStringSection::finalizeContents() {
1687   uint64_t offset = 0;
1688   for (CStringInputSection *isec : inputs) {
1689     for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {
1690       if (!piece.live)
1691         continue;
1692       // See comment above DeduplicatedCStringSection for how alignment is
1693       // handled.
1694       uint32_t pieceAlign = 1
1695                             << llvm::countr_zero(isec->align | piece.inSecOff);
1696       offset = alignToPowerOf2(offset, pieceAlign);
1697       piece.outSecOff = offset;
1698       isec->isFinal = true;
1699       StringRef string = isec->getStringRef(i);
1700       offset += string.size() + 1; // account for null terminator
1701     }
1702   }
1703   size = offset;
1704 }
1705 
1706 // Mergeable cstring literals are found under the __TEXT,__cstring section. In
1707 // contrast to ELF, which puts strings that need different alignments into
1708 // different sections, clang's Mach-O backend puts them all in one section.
1709 // Strings that need to be aligned have the .p2align directive emitted before
1710 // them, which simply translates into zero padding in the object file. In other
1711 // words, we have to infer the desired alignment of these cstrings from their
1712 // addresses.
1713 //
1714 // We differ slightly from ld64 in how we've chosen to align these cstrings.
1715 // Both LLD and ld64 preserve the number of trailing zeros in each cstring's
1716 // address in the input object files. When deduplicating identical cstrings,
1717 // both linkers pick the cstring whose address has more trailing zeros, and
1718 // preserve the alignment of that address in the final binary. However, ld64
1719 // goes a step further and also preserves the offset of the cstring from the
1720 // last section-aligned address.  I.e. if a cstring is at offset 18 in the
1721 // input, with a section alignment of 16, then both LLD and ld64 will ensure the
1722 // final address is 2-byte aligned (since 18 == 16 + 2). But ld64 will also
1723 // ensure that the final address is of the form 16 * k + 2 for some k.
1724 //
1725 // Note that ld64's heuristic means that a dedup'ed cstring's final address is
1726 // dependent on the order of the input object files. E.g. if in addition to the
1727 // cstring at offset 18 above, we have a duplicate one in another file with a
1728 // `.cstring` section alignment of 2 and an offset of zero, then ld64 will pick
1729 // the cstring from the object file earlier on the command line (since both have
1730 // the same number of trailing zeros in their address). So the final cstring may
1731 // either be at some address `16 * k + 2` or at some address `2 * k`.
1732 //
1733 // I've opted not to follow this behavior primarily for implementation
1734 // simplicity, and secondarily to save a few more bytes. It's not clear to me
1735 // that preserving the section alignment + offset is ever necessary, and there
1736 // are many cases that are clearly redundant. In particular, if an x86_64 object
1737 // file contains some strings that are accessed via SIMD instructions, then the
1738 // .cstring section in the object file will be 16-byte-aligned (since SIMD
1739 // requires its operand addresses to be 16-byte aligned). However, there will
1740 // typically also be other cstrings in the same file that aren't used via SIMD
1741 // and don't need this alignment. They will be emitted at some arbitrary address
1742 // `A`, but ld64 will treat them as being 16-byte aligned with an offset of `16
1743 // % A`.
1744 void DeduplicatedCStringSection::finalizeContents() {
1745   // Find the largest alignment required for each string.
1746   for (const CStringInputSection *isec : inputs) {
1747     for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {
1748       if (!piece.live)
1749         continue;
1750       auto s = isec->getCachedHashStringRef(i);
1751       assert(isec->align != 0);
1752       uint8_t trailingZeros = llvm::countr_zero(isec->align | piece.inSecOff);
1753       auto it = stringOffsetMap.insert(
1754           std::make_pair(s, StringOffset(trailingZeros)));
1755       if (!it.second && it.first->second.trailingZeros < trailingZeros)
1756         it.first->second.trailingZeros = trailingZeros;
1757     }
1758   }
1759 
1760   // Assign an offset for each string and save it to the corresponding
1761   // StringPieces for easy access.
1762   for (CStringInputSection *isec : inputs) {
1763     for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {
1764       if (!piece.live)
1765         continue;
1766       auto s = isec->getCachedHashStringRef(i);
1767       auto it = stringOffsetMap.find(s);
1768       assert(it != stringOffsetMap.end());
1769       StringOffset &offsetInfo = it->second;
1770       if (offsetInfo.outSecOff == UINT64_MAX) {
1771         offsetInfo.outSecOff =
1772             alignToPowerOf2(size, 1ULL << offsetInfo.trailingZeros);
1773         size =
1774             offsetInfo.outSecOff + s.size() + 1; // account for null terminator
1775       }
1776       piece.outSecOff = offsetInfo.outSecOff;
1777     }
1778     isec->isFinal = true;
1779   }
1780 }
1781 
1782 void DeduplicatedCStringSection::writeTo(uint8_t *buf) const {
1783   for (const auto &p : stringOffsetMap) {
1784     StringRef data = p.first.val();
1785     uint64_t off = p.second.outSecOff;
1786     if (!data.empty())
1787       memcpy(buf + off, data.data(), data.size());
1788   }
1789 }
1790 
1791 DeduplicatedCStringSection::StringOffset
1792 DeduplicatedCStringSection::getStringOffset(StringRef str) const {
1793   // StringPiece uses 31 bits to store the hashes, so we replicate that
1794   uint32_t hash = xxh3_64bits(str) & 0x7fffffff;
1795   auto offset = stringOffsetMap.find(CachedHashStringRef(str, hash));
1796   assert(offset != stringOffsetMap.end() &&
1797          "Looked-up strings should always exist in section");
1798   return offset->second;
1799 }
1800 
1801 // This section is actually emitted as __TEXT,__const by ld64, but clang may
1802 // emit input sections of that name, and LLD doesn't currently support mixing
1803 // synthetic and concat-type OutputSections. To work around this, I've given
1804 // our merged-literals section a different name.
1805 WordLiteralSection::WordLiteralSection()
1806     : SyntheticSection(segment_names::text, section_names::literals) {
1807   align = 16;
1808 }
1809 
1810 void WordLiteralSection::addInput(WordLiteralInputSection *isec) {
1811   isec->parent = this;
1812   inputs.push_back(isec);
1813 }
1814 
1815 void WordLiteralSection::finalizeContents() {
1816   for (WordLiteralInputSection *isec : inputs) {
1817     // We do all processing of the InputSection here, so it will be effectively
1818     // finalized.
1819     isec->isFinal = true;
1820     const uint8_t *buf = isec->data.data();
1821     switch (sectionType(isec->getFlags())) {
1822     case S_4BYTE_LITERALS: {
1823       for (size_t off = 0, e = isec->data.size(); off < e; off += 4) {
1824         if (!isec->isLive(off))
1825           continue;
1826         uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off);
1827         literal4Map.emplace(value, literal4Map.size());
1828       }
1829       break;
1830     }
1831     case S_8BYTE_LITERALS: {
1832       for (size_t off = 0, e = isec->data.size(); off < e; off += 8) {
1833         if (!isec->isLive(off))
1834           continue;
1835         uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off);
1836         literal8Map.emplace(value, literal8Map.size());
1837       }
1838       break;
1839     }
1840     case S_16BYTE_LITERALS: {
1841       for (size_t off = 0, e = isec->data.size(); off < e; off += 16) {
1842         if (!isec->isLive(off))
1843           continue;
1844         UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off);
1845         literal16Map.emplace(value, literal16Map.size());
1846       }
1847       break;
1848     }
1849     default:
1850       llvm_unreachable("invalid literal section type");
1851     }
1852   }
1853 }
1854 
1855 void WordLiteralSection::writeTo(uint8_t *buf) const {
1856   // Note that we don't attempt to do any endianness conversion in addInput(),
1857   // so we don't do it here either -- just write out the original value,
1858   // byte-for-byte.
1859   for (const auto &p : literal16Map)
1860     memcpy(buf + p.second * 16, &p.first, 16);
1861   buf += literal16Map.size() * 16;
1862 
1863   for (const auto &p : literal8Map)
1864     memcpy(buf + p.second * 8, &p.first, 8);
1865   buf += literal8Map.size() * 8;
1866 
1867   for (const auto &p : literal4Map)
1868     memcpy(buf + p.second * 4, &p.first, 4);
1869 }
1870 
1871 ObjCImageInfoSection::ObjCImageInfoSection()
1872     : SyntheticSection(segment_names::data, section_names::objCImageInfo) {}
1873 
1874 ObjCImageInfoSection::ImageInfo
1875 ObjCImageInfoSection::parseImageInfo(const InputFile *file) {
1876   ImageInfo info;
1877   ArrayRef<uint8_t> data = file->objCImageInfo;
1878   // The image info struct has the following layout:
1879   // struct {
1880   //   uint32_t version;
1881   //   uint32_t flags;
1882   // };
1883   if (data.size() < 8) {
1884     warn(toString(file) + ": invalid __objc_imageinfo size");
1885     return info;
1886   }
1887 
1888   auto *buf = reinterpret_cast<const uint32_t *>(data.data());
1889   if (read32le(buf) != 0) {
1890     warn(toString(file) + ": invalid __objc_imageinfo version");
1891     return info;
1892   }
1893 
1894   uint32_t flags = read32le(buf + 1);
1895   info.swiftVersion = (flags >> 8) & 0xff;
1896   info.hasCategoryClassProperties = flags & 0x40;
1897   return info;
1898 }
1899 
1900 static std::string swiftVersionString(uint8_t version) {
1901   switch (version) {
1902     case 1:
1903       return "1.0";
1904     case 2:
1905       return "1.1";
1906     case 3:
1907       return "2.0";
1908     case 4:
1909       return "3.0";
1910     case 5:
1911       return "4.0";
1912     default:
1913       return ("0x" + Twine::utohexstr(version)).str();
1914   }
1915 }
1916 
1917 // Validate each object file's __objc_imageinfo and use them to generate the
1918 // image info for the output binary. Only two pieces of info are relevant:
1919 // 1. The Swift version (should be identical across inputs)
1920 // 2. `bool hasCategoryClassProperties` (true only if true for all inputs)
1921 void ObjCImageInfoSection::finalizeContents() {
1922   assert(files.size() != 0); // should have already been checked via isNeeded()
1923 
1924   info.hasCategoryClassProperties = true;
1925   const InputFile *firstFile;
1926   for (const InputFile *file : files) {
1927     ImageInfo inputInfo = parseImageInfo(file);
1928     info.hasCategoryClassProperties &= inputInfo.hasCategoryClassProperties;
1929 
1930     // swiftVersion 0 means no Swift is present, so no version checking required
1931     if (inputInfo.swiftVersion == 0)
1932       continue;
1933 
1934     if (info.swiftVersion != 0 && info.swiftVersion != inputInfo.swiftVersion) {
1935       error("Swift version mismatch: " + toString(firstFile) + " has version " +
1936             swiftVersionString(info.swiftVersion) + " but " + toString(file) +
1937             " has version " + swiftVersionString(inputInfo.swiftVersion));
1938     } else {
1939       info.swiftVersion = inputInfo.swiftVersion;
1940       firstFile = file;
1941     }
1942   }
1943 }
1944 
1945 void ObjCImageInfoSection::writeTo(uint8_t *buf) const {
1946   uint32_t flags = info.hasCategoryClassProperties ? 0x40 : 0x0;
1947   flags |= info.swiftVersion << 8;
1948   write32le(buf + 4, flags);
1949 }
1950 
1951 InitOffsetsSection::InitOffsetsSection()
1952     : SyntheticSection(segment_names::text, section_names::initOffsets) {
1953   flags = S_INIT_FUNC_OFFSETS;
1954   align = 4; // This section contains 32-bit integers.
1955 }
1956 
1957 uint64_t InitOffsetsSection::getSize() const {
1958   size_t count = 0;
1959   for (const ConcatInputSection *isec : sections)
1960     count += isec->relocs.size();
1961   return count * sizeof(uint32_t);
1962 }
1963 
1964 void InitOffsetsSection::writeTo(uint8_t *buf) const {
1965   // FIXME: Add function specified by -init when that argument is implemented.
1966   for (ConcatInputSection *isec : sections) {
1967     for (const Reloc &rel : isec->relocs) {
1968       const Symbol *referent = cast<Symbol *>(rel.referent);
1969       assert(referent && "section relocation should have been rejected");
1970       uint64_t offset = referent->getVA() - in.header->addr;
1971       // FIXME: Can we handle this gracefully?
1972       if (offset > UINT32_MAX)
1973         fatal(isec->getLocation(rel.offset) + ": offset to initializer " +
1974               referent->getName() + " (" + utohexstr(offset) +
1975               ") does not fit in 32 bits");
1976 
1977       // Entries need to be added in the order they appear in the section, but
1978       // relocations aren't guaranteed to be sorted.
1979       size_t index = rel.offset >> target->p2WordSize;
1980       write32le(&buf[index * sizeof(uint32_t)], offset);
1981     }
1982     buf += isec->relocs.size() * sizeof(uint32_t);
1983   }
1984 }
1985 
1986 // The inputs are __mod_init_func sections, which contain pointers to
1987 // initializer functions, therefore all relocations should be of the UNSIGNED
1988 // type. InitOffsetsSection stores offsets, so if the initializer's address is
1989 // not known at link time, stub-indirection has to be used.
1990 void InitOffsetsSection::setUp() {
1991   for (const ConcatInputSection *isec : sections) {
1992     for (const Reloc &rel : isec->relocs) {
1993       RelocAttrs attrs = target->getRelocAttrs(rel.type);
1994       if (!attrs.hasAttr(RelocAttrBits::UNSIGNED))
1995         error(isec->getLocation(rel.offset) +
1996               ": unsupported relocation type: " + attrs.name);
1997       if (rel.addend != 0)
1998         error(isec->getLocation(rel.offset) +
1999               ": relocation addend is not representable in __init_offsets");
2000       if (isa<InputSection *>(rel.referent))
2001         error(isec->getLocation(rel.offset) +
2002               ": unexpected section relocation");
2003 
2004       Symbol *sym = rel.referent.dyn_cast<Symbol *>();
2005       if (auto *undefined = dyn_cast<Undefined>(sym))
2006         treatUndefinedSymbol(*undefined, isec, rel.offset);
2007       if (needsBinding(sym))
2008         in.stubs->addEntry(sym);
2009     }
2010   }
2011 }
2012 
2013 ObjCMethListSection::ObjCMethListSection()
2014     : SyntheticSection(segment_names::text, section_names::objcMethList) {
2015   flags = S_ATTR_NO_DEAD_STRIP;
2016   align = relativeOffsetSize;
2017 }
2018 
2019 // Go through all input method lists and ensure that we have selrefs for all
2020 // their method names. The selrefs will be needed later by ::writeTo. We need to
2021 // create them early on here to ensure they are processed correctly by the lld
2022 // pipeline.
2023 void ObjCMethListSection::setUp() {
2024   for (const ConcatInputSection *isec : inputs) {
2025     uint32_t structSizeAndFlags = 0, structCount = 0;
2026     readMethodListHeader(isec->data.data(), structSizeAndFlags, structCount);
2027     uint32_t originalStructSize = structSizeAndFlags & structSizeMask;
2028     // Method name is immediately after header
2029     uint32_t methodNameOff = methodListHeaderSize;
2030 
2031     // Loop through all methods, and ensure a selref for each of them exists.
2032     while (methodNameOff < isec->data.size()) {
2033       const Reloc *reloc = isec->getRelocAt(methodNameOff);
2034       assert(reloc && "Relocation expected at method list name slot");
2035 
2036       StringRef methname = reloc->getReferentString();
2037       if (!ObjCSelRefsHelper::getSelRef(methname))
2038         ObjCSelRefsHelper::makeSelRef(methname);
2039 
2040       // Jump to method name offset in next struct
2041       methodNameOff += originalStructSize;
2042     }
2043   }
2044 }
2045 
2046 // Calculate section size and final offsets for where InputSection's need to be
2047 // written.
2048 void ObjCMethListSection::finalize() {
2049   // sectionSize will be the total size of the __objc_methlist section
2050   sectionSize = 0;
2051   for (ConcatInputSection *isec : inputs) {
2052     // We can also use sectionSize as write offset for isec
2053     assert(sectionSize == alignToPowerOf2(sectionSize, relativeOffsetSize) &&
2054            "expected __objc_methlist to be aligned by default with the "
2055            "required section alignment");
2056     isec->outSecOff = sectionSize;
2057 
2058     isec->isFinal = true;
2059     uint32_t relativeListSize =
2060         computeRelativeMethodListSize(isec->data.size());
2061     sectionSize += relativeListSize;
2062 
2063     // If encoding the method list in relative offset format shrinks the size,
2064     // then we also need to adjust symbol sizes to match the new size. Note that
2065     // on 32bit platforms the size of the method list will remain the same when
2066     // encoded in relative offset format.
2067     if (relativeListSize != isec->data.size()) {
2068       for (Symbol *sym : isec->symbols) {
2069         assert(isa<Defined>(sym) &&
2070                "Unexpected undefined symbol in ObjC method list");
2071         auto *def = cast<Defined>(sym);
2072         // There can be 0-size symbols, check if this is the case and ignore
2073         // them.
2074         if (def->size) {
2075           assert(
2076               def->size == isec->data.size() &&
2077               "Invalid ObjC method list symbol size: expected symbol size to "
2078               "match isec size");
2079           def->size = relativeListSize;
2080         }
2081       }
2082     }
2083   }
2084 }
2085 
2086 void ObjCMethListSection::writeTo(uint8_t *bufStart) const {
2087   uint8_t *buf = bufStart;
2088   for (const ConcatInputSection *isec : inputs) {
2089     assert(buf - bufStart == std::ptrdiff_t(isec->outSecOff) &&
2090            "Writing at unexpected offset");
2091     uint32_t writtenSize = writeRelativeMethodList(isec, buf);
2092     buf += writtenSize;
2093   }
2094   assert(buf - bufStart == std::ptrdiff_t(sectionSize) &&
2095          "Written size does not match expected section size");
2096 }
2097 
2098 // Check if an InputSection is a method list. To do this we scan the
2099 // InputSection for any symbols who's names match the patterns we expect clang
2100 // to generate for method lists.
2101 bool ObjCMethListSection::isMethodList(const ConcatInputSection *isec) {
2102   const char *symPrefixes[] = {objc::symbol_names::classMethods,
2103                                objc::symbol_names::instanceMethods,
2104                                objc::symbol_names::categoryInstanceMethods,
2105                                objc::symbol_names::categoryClassMethods};
2106   if (!isec)
2107     return false;
2108   for (const Symbol *sym : isec->symbols) {
2109     auto *def = dyn_cast_or_null<Defined>(sym);
2110     if (!def)
2111       continue;
2112     for (const char *prefix : symPrefixes) {
2113       if (def->getName().starts_with(prefix)) {
2114         assert(def->size == isec->data.size() &&
2115                "Invalid ObjC method list symbol size: expected symbol size to "
2116                "match isec size");
2117         assert(def->value == 0 &&
2118                "Offset of ObjC method list symbol must be 0");
2119         return true;
2120       }
2121     }
2122   }
2123 
2124   return false;
2125 }
2126 
2127 // Encode a single relative offset value. The input is the data/symbol at
2128 // (&isec->data[inSecOff]). The output is written to (&buf[outSecOff]).
2129 // 'createSelRef' indicates that we should not directly use the specified
2130 // symbol, but instead get the selRef for the symbol and use that instead.
2131 void ObjCMethListSection::writeRelativeOffsetForIsec(
2132     const ConcatInputSection *isec, uint8_t *buf, uint32_t &inSecOff,
2133     uint32_t &outSecOff, bool useSelRef) const {
2134   const Reloc *reloc = isec->getRelocAt(inSecOff);
2135   assert(reloc && "Relocation expected at __objc_methlist Offset");
2136 
2137   uint32_t symVA = 0;
2138   if (useSelRef) {
2139     StringRef methname = reloc->getReferentString();
2140     ConcatInputSection *selRef = ObjCSelRefsHelper::getSelRef(methname);
2141     assert(selRef && "Expected all selector names to already be already be "
2142                      "present in __objc_selrefs");
2143     symVA = selRef->getVA();
2144     assert(selRef->data.size() == target->wordSize &&
2145            "Expected one selref per ConcatInputSection");
2146   } else if (auto *sym = dyn_cast<Symbol *>(reloc->referent)) {
2147     auto *def = dyn_cast_or_null<Defined>(sym);
2148     assert(def && "Expected all syms in __objc_methlist to be defined");
2149     symVA = def->getVA();
2150   } else {
2151     auto *isec = cast<InputSection *>(reloc->referent);
2152     symVA = isec->getVA(reloc->addend);
2153   }
2154 
2155   uint32_t currentVA = isec->getVA() + outSecOff;
2156   uint32_t delta = symVA - currentVA;
2157   write32le(buf + outSecOff, delta);
2158 
2159   // Move one pointer forward in the absolute method list
2160   inSecOff += target->wordSize;
2161   // Move one relative offset forward in the relative method list (32 bits)
2162   outSecOff += relativeOffsetSize;
2163 }
2164 
2165 // Write a relative method list to buf, return the size of the written
2166 // information
2167 uint32_t
2168 ObjCMethListSection::writeRelativeMethodList(const ConcatInputSection *isec,
2169                                              uint8_t *buf) const {
2170   // Copy over the header, and add the "this is a relative method list" magic
2171   // value flag
2172   uint32_t structSizeAndFlags = 0, structCount = 0;
2173   readMethodListHeader(isec->data.data(), structSizeAndFlags, structCount);
2174   // Set the struct size for the relative method list
2175   uint32_t relativeStructSizeAndFlags =
2176       (relativeOffsetSize * pointersPerStruct) & structSizeMask;
2177   // Carry over the old flags from the input struct
2178   relativeStructSizeAndFlags |= structSizeAndFlags & structFlagsMask;
2179   // Set the relative method list flag
2180   relativeStructSizeAndFlags |= relMethodHeaderFlag;
2181 
2182   writeMethodListHeader(buf, relativeStructSizeAndFlags, structCount);
2183 
2184   assert(methodListHeaderSize +
2185                  (structCount * pointersPerStruct * target->wordSize) ==
2186              isec->data.size() &&
2187          "Invalid computed ObjC method list size");
2188 
2189   uint32_t inSecOff = methodListHeaderSize;
2190   uint32_t outSecOff = methodListHeaderSize;
2191 
2192   // Go through the method list and encode input absolute pointers as relative
2193   // offsets. writeRelativeOffsetForIsec will be incrementing inSecOff and
2194   // outSecOff
2195   for (uint32_t i = 0; i < structCount; i++) {
2196     // Write the name of the method
2197     writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, true);
2198     // Write the type of the method
2199     writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, false);
2200     // Write reference to the selector of the method
2201     writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, false);
2202   }
2203 
2204   // Expecting to have read all the data in the isec
2205   assert(inSecOff == isec->data.size() &&
2206          "Invalid actual ObjC method list size");
2207   assert(
2208       outSecOff == computeRelativeMethodListSize(inSecOff) &&
2209       "Mismatch between input & output size when writing relative method list");
2210   return outSecOff;
2211 }
2212 
2213 // Given the size of an ObjC method list InputSection, return the size of the
2214 // method list when encoded in relative offsets format. We can do this without
2215 // decoding the actual data, as it can be directly inferred from the size of the
2216 // isec.
2217 uint32_t ObjCMethListSection::computeRelativeMethodListSize(
2218     uint32_t absoluteMethodListSize) const {
2219   uint32_t oldPointersSize = absoluteMethodListSize - methodListHeaderSize;
2220   uint32_t pointerCount = oldPointersSize / target->wordSize;
2221   assert(((pointerCount % pointersPerStruct) == 0) &&
2222          "__objc_methlist expects method lists to have multiple-of-3 pointers");
2223 
2224   uint32_t newPointersSize = pointerCount * relativeOffsetSize;
2225   uint32_t newTotalSize = methodListHeaderSize + newPointersSize;
2226 
2227   assert((newTotalSize <= absoluteMethodListSize) &&
2228          "Expected relative method list size to be smaller or equal than "
2229          "original size");
2230   return newTotalSize;
2231 }
2232 
2233 // Read a method list header from buf
2234 void ObjCMethListSection::readMethodListHeader(const uint8_t *buf,
2235                                                uint32_t &structSizeAndFlags,
2236                                                uint32_t &structCount) const {
2237   structSizeAndFlags = read32le(buf);
2238   structCount = read32le(buf + sizeof(uint32_t));
2239 }
2240 
2241 // Write a method list header to buf
2242 void ObjCMethListSection::writeMethodListHeader(uint8_t *buf,
2243                                                 uint32_t structSizeAndFlags,
2244                                                 uint32_t structCount) const {
2245   write32le(buf, structSizeAndFlags);
2246   write32le(buf + sizeof(structSizeAndFlags), structCount);
2247 }
2248 
2249 void macho::createSyntheticSymbols() {
2250   auto addHeaderSymbol = [](const char *name) {
2251     symtab->addSynthetic(name, in.header->isec, /*value=*/0,
2252                          /*isPrivateExtern=*/true, /*includeInSymtab=*/false,
2253                          /*referencedDynamically=*/false);
2254   };
2255 
2256   switch (config->outputType) {
2257     // FIXME: Assign the right address value for these symbols
2258     // (rather than 0). But we need to do that after assignAddresses().
2259   case MH_EXECUTE:
2260     // If linking PIE, __mh_execute_header is a defined symbol in
2261     //  __TEXT, __text)
2262     // Otherwise, it's an absolute symbol.
2263     if (config->isPic)
2264       symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0,
2265                            /*isPrivateExtern=*/false, /*includeInSymtab=*/true,
2266                            /*referencedDynamically=*/true);
2267     else
2268       symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0,
2269                            /*isPrivateExtern=*/false, /*includeInSymtab=*/true,
2270                            /*referencedDynamically=*/true);
2271     break;
2272 
2273     // The following symbols are N_SECT symbols, even though the header is not
2274     // part of any section and that they are private to the bundle/dylib/object
2275     // they are part of.
2276   case MH_BUNDLE:
2277     addHeaderSymbol("__mh_bundle_header");
2278     break;
2279   case MH_DYLIB:
2280     addHeaderSymbol("__mh_dylib_header");
2281     break;
2282   case MH_DYLINKER:
2283     addHeaderSymbol("__mh_dylinker_header");
2284     break;
2285   case MH_OBJECT:
2286     addHeaderSymbol("__mh_object_header");
2287     break;
2288   default:
2289     llvm_unreachable("unexpected outputType");
2290     break;
2291   }
2292 
2293   // The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit
2294   // which does e.g. cleanup of static global variables. The ABI document
2295   // says that the pointer can point to any address in one of the dylib's
2296   // segments, but in practice ld64 seems to set it to point to the header,
2297   // so that's what's implemented here.
2298   addHeaderSymbol("___dso_handle");
2299 }
2300 
2301 ChainedFixupsSection::ChainedFixupsSection()
2302     : LinkEditSection(segment_names::linkEdit, section_names::chainFixups) {}
2303 
2304 bool ChainedFixupsSection::isNeeded() const {
2305   assert(config->emitChainedFixups);
2306   // dyld always expects LC_DYLD_CHAINED_FIXUPS to point to a valid
2307   // dyld_chained_fixups_header, so we create this section even if there aren't
2308   // any fixups.
2309   return true;
2310 }
2311 
2312 void ChainedFixupsSection::addBinding(const Symbol *sym,
2313                                       const InputSection *isec, uint64_t offset,
2314                                       int64_t addend) {
2315   locations.emplace_back(isec, offset);
2316   int64_t outlineAddend = (addend < 0 || addend > 0xFF) ? addend : 0;
2317   auto [it, inserted] = bindings.insert(
2318       {{sym, outlineAddend}, static_cast<uint32_t>(bindings.size())});
2319 
2320   if (inserted) {
2321     symtabSize += sym->getName().size() + 1;
2322     hasWeakBind = hasWeakBind || needsWeakBind(*sym);
2323     if (!isInt<23>(outlineAddend))
2324       needsLargeAddend = true;
2325     else if (outlineAddend != 0)
2326       needsAddend = true;
2327   }
2328 }
2329 
2330 std::pair<uint32_t, uint8_t>
2331 ChainedFixupsSection::getBinding(const Symbol *sym, int64_t addend) const {
2332   int64_t outlineAddend = (addend < 0 || addend > 0xFF) ? addend : 0;
2333   auto it = bindings.find({sym, outlineAddend});
2334   assert(it != bindings.end() && "binding not found in the imports table");
2335   if (outlineAddend == 0)
2336     return {it->second, addend};
2337   return {it->second, 0};
2338 }
2339 
2340 static size_t writeImport(uint8_t *buf, int format, int16_t libOrdinal,
2341                           bool weakRef, uint32_t nameOffset, int64_t addend) {
2342   switch (format) {
2343   case DYLD_CHAINED_IMPORT: {
2344     auto *import = reinterpret_cast<dyld_chained_import *>(buf);
2345     import->lib_ordinal = libOrdinal;
2346     import->weak_import = weakRef;
2347     import->name_offset = nameOffset;
2348     return sizeof(dyld_chained_import);
2349   }
2350   case DYLD_CHAINED_IMPORT_ADDEND: {
2351     auto *import = reinterpret_cast<dyld_chained_import_addend *>(buf);
2352     import->lib_ordinal = libOrdinal;
2353     import->weak_import = weakRef;
2354     import->name_offset = nameOffset;
2355     import->addend = addend;
2356     return sizeof(dyld_chained_import_addend);
2357   }
2358   case DYLD_CHAINED_IMPORT_ADDEND64: {
2359     auto *import = reinterpret_cast<dyld_chained_import_addend64 *>(buf);
2360     import->lib_ordinal = libOrdinal;
2361     import->weak_import = weakRef;
2362     import->name_offset = nameOffset;
2363     import->addend = addend;
2364     return sizeof(dyld_chained_import_addend64);
2365   }
2366   default:
2367     llvm_unreachable("Unknown import format");
2368   }
2369 }
2370 
2371 size_t ChainedFixupsSection::SegmentInfo::getSize() const {
2372   assert(pageStarts.size() > 0 && "SegmentInfo for segment with no fixups?");
2373   return alignTo<8>(sizeof(dyld_chained_starts_in_segment) +
2374                     pageStarts.back().first * sizeof(uint16_t));
2375 }
2376 
2377 size_t ChainedFixupsSection::SegmentInfo::writeTo(uint8_t *buf) const {
2378   auto *segInfo = reinterpret_cast<dyld_chained_starts_in_segment *>(buf);
2379   segInfo->size = getSize();
2380   segInfo->page_size = target->getPageSize();
2381   // FIXME: Use DYLD_CHAINED_PTR_64_OFFSET on newer OS versions.
2382   segInfo->pointer_format = DYLD_CHAINED_PTR_64;
2383   segInfo->segment_offset = oseg->addr - in.header->addr;
2384   segInfo->max_valid_pointer = 0; // not used on 64-bit
2385   segInfo->page_count = pageStarts.back().first + 1;
2386 
2387   uint16_t *starts = segInfo->page_start;
2388   for (size_t i = 0; i < segInfo->page_count; ++i)
2389     starts[i] = DYLD_CHAINED_PTR_START_NONE;
2390 
2391   for (auto [pageIdx, startAddr] : pageStarts)
2392     starts[pageIdx] = startAddr;
2393   return segInfo->size;
2394 }
2395 
2396 static size_t importEntrySize(int format) {
2397   switch (format) {
2398   case DYLD_CHAINED_IMPORT:
2399     return sizeof(dyld_chained_import);
2400   case DYLD_CHAINED_IMPORT_ADDEND:
2401     return sizeof(dyld_chained_import_addend);
2402   case DYLD_CHAINED_IMPORT_ADDEND64:
2403     return sizeof(dyld_chained_import_addend64);
2404   default:
2405     llvm_unreachable("Unknown import format");
2406   }
2407 }
2408 
2409 // This is step 3 of the algorithm described in the class comment of
2410 // ChainedFixupsSection.
2411 //
2412 // LC_DYLD_CHAINED_FIXUPS data consists of (in this order):
2413 // * A dyld_chained_fixups_header
2414 // * A dyld_chained_starts_in_image
2415 // * One dyld_chained_starts_in_segment per segment
2416 // * List of all imports (dyld_chained_import, dyld_chained_import_addend, or
2417 //   dyld_chained_import_addend64)
2418 // * Names of imported symbols
2419 void ChainedFixupsSection::writeTo(uint8_t *buf) const {
2420   auto *header = reinterpret_cast<dyld_chained_fixups_header *>(buf);
2421   header->fixups_version = 0;
2422   header->imports_count = bindings.size();
2423   header->imports_format = importFormat;
2424   header->symbols_format = 0;
2425 
2426   buf += alignTo<8>(sizeof(*header));
2427 
2428   auto curOffset = [&buf, &header]() -> uint32_t {
2429     return buf - reinterpret_cast<uint8_t *>(header);
2430   };
2431 
2432   header->starts_offset = curOffset();
2433 
2434   auto *imageInfo = reinterpret_cast<dyld_chained_starts_in_image *>(buf);
2435   imageInfo->seg_count = outputSegments.size();
2436   uint32_t *segStarts = imageInfo->seg_info_offset;
2437 
2438   // dyld_chained_starts_in_image ends in a flexible array member containing an
2439   // uint32_t for each segment. Leave room for it, and fill it via segStarts.
2440   buf += alignTo<8>(offsetof(dyld_chained_starts_in_image, seg_info_offset) +
2441                     outputSegments.size() * sizeof(uint32_t));
2442 
2443   // Initialize all offsets to 0, which indicates that the segment does not have
2444   // fixups. Those that do have them will be filled in below.
2445   for (size_t i = 0; i < outputSegments.size(); ++i)
2446     segStarts[i] = 0;
2447 
2448   for (const SegmentInfo &seg : fixupSegments) {
2449     segStarts[seg.oseg->index] = curOffset() - header->starts_offset;
2450     buf += seg.writeTo(buf);
2451   }
2452 
2453   // Write imports table.
2454   header->imports_offset = curOffset();
2455   uint64_t nameOffset = 0;
2456   for (auto [import, idx] : bindings) {
2457     const Symbol &sym = *import.first;
2458     buf += writeImport(buf, importFormat, ordinalForSymbol(sym),
2459                        sym.isWeakRef(), nameOffset, import.second);
2460     nameOffset += sym.getName().size() + 1;
2461   }
2462 
2463   // Write imported symbol names.
2464   header->symbols_offset = curOffset();
2465   for (auto [import, idx] : bindings) {
2466     StringRef name = import.first->getName();
2467     memcpy(buf, name.data(), name.size());
2468     buf += name.size() + 1; // account for null terminator
2469   }
2470 
2471   assert(curOffset() == getRawSize());
2472 }
2473 
2474 // This is step 2 of the algorithm described in the class comment of
2475 // ChainedFixupsSection.
2476 void ChainedFixupsSection::finalizeContents() {
2477   assert(target->wordSize == 8 && "Only 64-bit platforms are supported");
2478   assert(config->emitChainedFixups);
2479 
2480   if (!isUInt<32>(symtabSize))
2481     error("cannot encode chained fixups: imported symbols table size " +
2482           Twine(symtabSize) + " exceeds 4 GiB");
2483 
2484   bool needsLargeOrdinal = any_of(bindings, [](const auto &p) {
2485     // 0xF1 - 0xFF are reserved for special ordinals in the 8-bit encoding.
2486     return ordinalForSymbol(*p.first.first) > 0xF0;
2487   });
2488 
2489   if (needsLargeAddend || !isUInt<23>(symtabSize) || needsLargeOrdinal)
2490     importFormat = DYLD_CHAINED_IMPORT_ADDEND64;
2491   else if (needsAddend)
2492     importFormat = DYLD_CHAINED_IMPORT_ADDEND;
2493   else
2494     importFormat = DYLD_CHAINED_IMPORT;
2495 
2496   for (Location &loc : locations)
2497     loc.offset =
2498         loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(loc.offset);
2499 
2500   llvm::sort(locations, [](const Location &a, const Location &b) {
2501     const OutputSegment *segA = a.isec->parent->parent;
2502     const OutputSegment *segB = b.isec->parent->parent;
2503     if (segA == segB)
2504       return a.offset < b.offset;
2505     return segA->addr < segB->addr;
2506   });
2507 
2508   auto sameSegment = [](const Location &a, const Location &b) {
2509     return a.isec->parent->parent == b.isec->parent->parent;
2510   };
2511 
2512   const uint64_t pageSize = target->getPageSize();
2513   for (size_t i = 0, count = locations.size(); i < count;) {
2514     const Location &firstLoc = locations[i];
2515     fixupSegments.emplace_back(firstLoc.isec->parent->parent);
2516     while (i < count && sameSegment(locations[i], firstLoc)) {
2517       uint32_t pageIdx = locations[i].offset / pageSize;
2518       fixupSegments.back().pageStarts.emplace_back(
2519           pageIdx, locations[i].offset % pageSize);
2520       ++i;
2521       while (i < count && sameSegment(locations[i], firstLoc) &&
2522              locations[i].offset / pageSize == pageIdx)
2523         ++i;
2524     }
2525   }
2526 
2527   // Compute expected encoded size.
2528   size = alignTo<8>(sizeof(dyld_chained_fixups_header));
2529   size += alignTo<8>(offsetof(dyld_chained_starts_in_image, seg_info_offset) +
2530                      outputSegments.size() * sizeof(uint32_t));
2531   for (const SegmentInfo &seg : fixupSegments)
2532     size += seg.getSize();
2533   size += importEntrySize(importFormat) * bindings.size();
2534   size += symtabSize;
2535 }
2536 
2537 template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &);
2538 template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &);
2539